提交 dc20aaa3 编写于 作者: Y Yury Norov 提交者: Yang Yingliang

arm64: introduce is_a32_compat_{task, thread} for AArch32 compat

hulk inclusion
category: feature
bugzilla: NA
CVE: NA
---------------------------

Based on patch of Andrew Pinski.

This patch introduces is_a32_compat_task and is_a32_thread so it is
easier to say this is a a32 specific thread or a generic compat
thread/task. Corresponding functions are located in <asm/is_compat.h>
to avoid mess in headers.

Some files include both <linux/compat.h> and <asm/compat.h>,
and this is wrong because <linux/compat.h> has <asm/compat.h> already
included. It was fixed too.
Signed-off-by: NYury Norov <ynorov@caviumnetworks.com>
Signed-off-by: NAndrew Pinski <Andrew.Pinski@caviumnetworks.com>
Signed-off-by: NBamvor Jian Zhang <bamv2005@gmail.com>

 Conflicts:
	arch/arm64/include/asm/processor.h
	arch/arm64/kernel/process.c
	arch/arm64/kernel/syscall.c
[wangxiongfeng: fix conflicts in arch/arm64/include/asm/processor.h
because of the following commits:
2f26fc4e0 arm64: compat: Reduce address limit
2a1d3f8a9 arm64: ptrace: Override SPSR.SS when single-stepping is enabled]
Signed-off-by: NXiongfeng Wang <wangxiongfeng2@huawei.com>
Reviewed-by: NHanjun Guo &lt;guohanjun@huawei.com <mailto:guohanjun@huawei.com&gt;>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 6cbd06bf
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <asm/is_compat.h>
#define COMPAT_USER_HZ 100 #define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__ #ifdef __AARCH64EB__
#define COMPAT_UTS_MACHINE "armv8b\0\0" #define COMPAT_UTS_MACHINE "armv8b\0\0"
...@@ -224,23 +226,6 @@ struct compat_shmid64_ds { ...@@ -224,23 +226,6 @@ struct compat_shmid64_ds {
compat_ulong_t __unused5; compat_ulong_t __unused5;
}; };
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
}
static inline int is_compat_thread(struct thread_info *thread)
{
return test_ti_thread_flag(thread, TIF_32BIT);
}
#else /* !CONFIG_COMPAT */
static inline int is_compat_thread(struct thread_info *thread)
{
return 0;
}
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_COMPAT_H */ #endif /* __ASM_COMPAT_H */
...@@ -16,6 +16,10 @@ ...@@ -16,6 +16,10 @@
#ifndef __ASM_ELF_H #ifndef __ASM_ELF_H
#define __ASM_ELF_H #define __ASM_ELF_H
#ifndef __ASSEMBLY__
#include <linux/compat.h>
#endif
#include <asm/hwcap.h> #include <asm/hwcap.h>
/* /*
...@@ -169,13 +173,9 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, ...@@ -169,13 +173,9 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp); int uses_interp);
/* 1GB of VA */ /* 1GB of VA */
#ifdef CONFIG_COMPAT #define STACK_RND_MASK (is_compat_task() ? \
#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
0x7ff >> (PAGE_SHIFT - 12) : \ 0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12)) 0x3ffff >> (PAGE_SHIFT - 12))
#else
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
#ifdef __AARCH64EB__ #ifdef __AARCH64EB__
#define COMPAT_ELF_PLATFORM ("v8b") #define COMPAT_ELF_PLATFORM ("v8b")
......
...@@ -54,7 +54,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -54,7 +54,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
{ {
return is_compat_task(); return is_a32_compat_task();
} }
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
......
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __ASM_IS_COMPAT_H
#define __ASM_IS_COMPAT_H
#ifndef __ASSEMBLY__
#include <linux/thread_bits.h>
#ifdef CONFIG_AARCH32_EL0
static inline int is_a32_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
}
static inline int is_a32_compat_thread(struct thread_info *thread)
{
return test_ti_thread_flag(thread, TIF_32BIT);
}
#else
static inline int is_a32_compat_task(void)
{
return 0;
}
static inline int is_a32_compat_thread(struct thread_info *thread)
{
return 0;
}
#endif /* CONFIG_AARCH32_EL0 */
#ifdef CONFIG_COMPAT
static inline int is_compat_task(void)
{
return is_a32_compat_task();
}
#endif /* CONFIG_COMPAT */
static inline int is_compat_thread(struct thread_info *thread)
{
return is_a32_compat_thread(thread);
}
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_IS_COMPAT_H */
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/is_compat.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/lse.h> #include <asm/lse.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
...@@ -62,9 +63,9 @@ ...@@ -62,9 +63,9 @@
#else #else
#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE) #define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
#endif /* CONFIG_ARM64_64K_PAGES */ #endif /* CONFIG_ARM64_64K_PAGES */
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ #define TASK_SIZE (is_compat_task() ? \
TASK_SIZE_32 : TASK_SIZE_64) TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ #define TASK_SIZE_OF(tsk) (is_compat_thread(tsk) ? \
TASK_SIZE_32 : TASK_SIZE_64) TASK_SIZE_32 : TASK_SIZE_64)
#else #else
#define TASK_SIZE TASK_SIZE_64 #define TASK_SIZE TASK_SIZE_64
...@@ -75,7 +76,7 @@ ...@@ -75,7 +76,7 @@
#define STACK_TOP_MAX TASK_SIZE_64 #define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#define AARCH32_VECTORS_BASE 0xffff0000 #define AARCH32_VECTORS_BASE 0xffff0000
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ #define STACK_TOP (is_compat_task() ? \
AARCH32_VECTORS_BASE : STACK_TOP_MAX) AARCH32_VECTORS_BASE : STACK_TOP_MAX)
#else #else
#define STACK_TOP STACK_TOP_MAX #define STACK_TOP STACK_TOP_MAX
...@@ -153,7 +154,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, ...@@ -153,7 +154,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
#define task_user_tls(t) \ #define task_user_tls(t) \
({ \ ({ \
unsigned long *__tls; \ unsigned long *__tls; \
if (is_compat_thread(task_thread_info(t))) \ if (is_a32_compat_thread(task_thread_info(t))) \
__tls = &(t)->thread.uw.tp2_value; \ __tls = &(t)->thread.uw.tp2_value; \
else \ else \
__tls = &(t)->thread.uw.tp_value; \ __tls = &(t)->thread.uw.tp_value; \
......
...@@ -129,7 +129,7 @@ static inline void syscall_set_arguments(struct task_struct *task, ...@@ -129,7 +129,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
*/ */
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)
{ {
if (is_compat_task()) if (is_a32_compat_task())
return AUDIT_ARCH_ARM; return AUDIT_ARCH_ARM;
return AUDIT_ARCH_AARCH64; return AUDIT_ARCH_AARCH64;
......
...@@ -86,7 +86,7 @@ void arch_release_task_struct(struct task_struct *tsk); ...@@ -86,7 +86,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_FREEZE 19 #define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20 #define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21 #define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */ #define TIF_32BIT 22 /* AARCH32 process */
#define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE 23 /* Scalable Vector Extension in use */
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */ #define TIF_SSBD 25 /* Wants SSB mitigation */
......
...@@ -168,7 +168,7 @@ enum hw_breakpoint_ops { ...@@ -168,7 +168,7 @@ enum hw_breakpoint_ops {
HW_BREAKPOINT_RESTORE HW_BREAKPOINT_RESTORE
}; };
static int is_compat_bp(struct perf_event *bp) static int is_a32_compat_bp(struct perf_event *bp)
{ {
struct task_struct *tsk = bp->hw.target; struct task_struct *tsk = bp->hw.target;
...@@ -179,7 +179,7 @@ static int is_compat_bp(struct perf_event *bp) ...@@ -179,7 +179,7 @@ static int is_compat_bp(struct perf_event *bp)
* deprecated behaviour if we use unaligned watchpoints in * deprecated behaviour if we use unaligned watchpoints in
* AArch64 state. * AArch64 state.
*/ */
return tsk && is_compat_thread(task_thread_info(tsk)); return tsk && is_a32_compat_thread(task_thread_info(tsk));
} }
/** /**
...@@ -478,7 +478,7 @@ static int arch_build_bp_info(struct perf_event *bp, ...@@ -478,7 +478,7 @@ static int arch_build_bp_info(struct perf_event *bp,
* Watchpoints can be of length 1, 2, 4 or 8 bytes. * Watchpoints can be of length 1, 2, 4 or 8 bytes.
*/ */
if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) { if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
if (is_compat_bp(bp)) { if (is_a32_compat_bp(bp)) {
if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
hw->ctrl.len != ARM_BREAKPOINT_LEN_4) hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL; return -EINVAL;
...@@ -536,7 +536,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, ...@@ -536,7 +536,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
* AArch32 tasks expect some simple alignment fixups, so emulate * AArch32 tasks expect some simple alignment fixups, so emulate
* that here. * that here.
*/ */
if (is_compat_bp(bp)) { if (is_a32_compat_bp(bp)) {
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7; alignment_mask = 0x7;
else else
......
...@@ -66,7 +66,7 @@ int perf_reg_validate(u64 mask) ...@@ -66,7 +66,7 @@ int perf_reg_validate(u64 mask)
u64 perf_reg_abi(struct task_struct *task) u64 perf_reg_abi(struct task_struct *task)
{ {
if (is_compat_thread(task_thread_info(task))) if (is_a32_compat_thread(task_thread_info(task)))
return PERF_SAMPLE_REGS_ABI_32; return PERF_SAMPLE_REGS_ABI_32;
else else
return PERF_SAMPLE_REGS_ABI_64; return PERF_SAMPLE_REGS_ABI_64;
......
...@@ -52,7 +52,6 @@ ...@@ -52,7 +52,6 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/arch_gicv3.h> #include <asm/arch_gicv3.h>
#include <asm/compat.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/exec.h> #include <asm/exec.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
...@@ -305,7 +304,7 @@ static void tls_thread_flush(void) ...@@ -305,7 +304,7 @@ static void tls_thread_flush(void)
{ {
write_sysreg(0, tpidr_el0); write_sysreg(0, tpidr_el0);
if (is_compat_task()) { if (is_a32_compat_task()) {
current->thread.uw.tp_value = 0; current->thread.uw.tp_value = 0;
/* /*
...@@ -387,7 +386,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -387,7 +386,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
*task_user_tls(p) = read_sysreg(tpidr_el0); *task_user_tls(p) = read_sysreg(tpidr_el0);
if (stack_start) { if (stack_start) {
if (is_compat_thread(task_thread_info(p))) if (is_a32_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start; childregs->compat_sp = stack_start;
else else
childregs->sp = stack_start; childregs->sp = stack_start;
...@@ -431,7 +430,7 @@ static void tls_thread_switch(struct task_struct *next) ...@@ -431,7 +430,7 @@ static void tls_thread_switch(struct task_struct *next)
{ {
tls_preserve_current_state(); tls_preserve_current_state();
if (is_compat_thread(task_thread_info(next))) if (is_a32_compat_thread(task_thread_info(next)))
write_sysreg(next->thread.uw.tp_value, tpidrro_el0); write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
else if (!arm64_kernel_unmapped_at_el0()) else if (!arm64_kernel_unmapped_at_el0())
write_sysreg(0, tpidrro_el0); write_sysreg(0, tpidrro_el0);
......
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#include <linux/tracehook.h> #include <linux/tracehook.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <asm/compat.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
...@@ -191,7 +190,7 @@ static void ptrace_hbptriggered(struct perf_event *bp, ...@@ -191,7 +190,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
info.si_addr = (void __user *)(bkpt->trigger); info.si_addr = (void __user *)(bkpt->trigger);
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_AARCH32_EL0
if (is_compat_task()) { if (is_a32_compat_task()) {
int si_errno = 0; int si_errno = 0;
int i; int i;
...@@ -1616,9 +1615,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) ...@@ -1616,9 +1615,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
* 32-bit children use an extended user_aarch32_ptrace_view to allow * 32-bit children use an extended user_aarch32_ptrace_view to allow
* access to the TLS register. * access to the TLS register.
*/ */
if (is_compat_task()) if (is_a32_compat_task())
return &user_aarch32_view; return &user_aarch32_view;
else if (is_compat_thread(task_thread_info(task))) else if (is_a32_compat_thread(task_thread_info(task)))
return &user_aarch32_ptrace_view; return &user_aarch32_ptrace_view;
#endif #endif
return &user_aarch64_view; return &user_aarch64_view;
...@@ -1645,7 +1644,7 @@ static void tracehook_report_syscall(struct pt_regs *regs, ...@@ -1645,7 +1644,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
* A scratch register (ip(r12) on AArch32, x7 on AArch64) is * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
* used to denote syscall entry/exit: * used to denote syscall entry/exit:
*/ */
regno = (is_compat_task() ? 12 : 7); regno = (is_a32_compat_task() ? 12 : 7);
saved_reg = regs->regs[regno]; saved_reg = regs->regs[regno];
regs->regs[regno] = dir; regs->regs[regno] = dir;
...@@ -1776,7 +1775,7 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) ...@@ -1776,7 +1775,7 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
/* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
user_regs_reset_single_step(regs, task); user_regs_reset_single_step(regs, task);
if (is_compat_thread(task_thread_info(task))) if (is_a32_compat_thread(task_thread_info(task)))
return valid_compat_regs(regs); return valid_compat_regs(regs);
else else
return valid_native_regs(regs); return valid_native_regs(regs);
......
...@@ -790,7 +790,7 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, ...@@ -790,7 +790,7 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
static void setup_restart_syscall(struct pt_regs *regs) static void setup_restart_syscall(struct pt_regs *regs)
{ {
if (is_compat_task()) if (is_a32_compat_task())
a32_setup_restart_syscall(regs); a32_setup_restart_syscall(regs);
else else
regs->regs[8] = __NR_restart_syscall; regs->regs[8] = __NR_restart_syscall;
...@@ -810,7 +810,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -810,7 +810,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* /*
* Set up the stack frame * Set up the stack frame
*/ */
if (is_compat_task()) { if (is_a32_compat_task()) {
if (ksig->ka.sa.sa_flags & SA_SIGINFO) if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = a32_setup_rt_frame(usig, ksig, oldset, regs); ret = a32_setup_rt_frame(usig, ksig, oldset, regs);
else else
......
...@@ -21,7 +21,7 @@ static long do_ni_syscall(struct pt_regs *regs, int scno) ...@@ -21,7 +21,7 @@ static long do_ni_syscall(struct pt_regs *regs, int scno)
{ {
#ifdef CONFIG_AARCH32_EL0 #ifdef CONFIG_AARCH32_EL0
long ret; long ret;
if (is_compat_task()) { if (is_a32_compat_task()) {
ret = a32_arm_syscall(regs, scno); ret = a32_arm_syscall(regs, scno);
if (ret != -ENOSYS) if (ret != -ENOSYS)
return ret; return ret;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
*/ */
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/compat.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
......
...@@ -54,7 +54,7 @@ unsigned long arch_mmap_rnd(void) ...@@ -54,7 +54,7 @@ unsigned long arch_mmap_rnd(void)
unsigned long rnd; unsigned long rnd;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) if (is_compat_task())
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else else
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册