Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
afa7a17f
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
afa7a17f
编写于
4月 20, 2017
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'WIP.x86/process' into perf/core
上级
e720c19e
e9ea1e7f
变更
22
隐藏空白更改
内联
并排
Showing
22 changed file
with
258 addition
and
88 deletion
+258
-88
arch/um/include/shared/os.h
arch/um/include/shared/os.h
+2
-2
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/entry/syscalls/syscall_32.tbl
+1
-0
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/cpufeatures.h
+1
-0
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/msr-index.h
+8
-3
arch/x86/include/asm/processor.h
arch/x86/include/asm/processor.h
+2
-0
arch/x86/include/asm/proto.h
arch/x86/include/asm/proto.h
+3
-1
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/thread_info.h
+5
-1
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/tlbflush.h
+10
-0
arch/x86/include/uapi/asm/prctl.h
arch/x86/include/uapi/asm/prctl.h
+7
-4
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel.c
+32
-8
arch/x86/kernel/process.c
arch/x86/kernel/process.c
+115
-36
arch/x86/kernel/process_32.c
arch/x86/kernel/process_32.c
+7
-0
arch/x86/kernel/process_64.c
arch/x86/kernel/process_64.c
+32
-16
arch/x86/kernel/ptrace.c
arch/x86/kernel/ptrace.c
+4
-4
arch/x86/um/Makefile
arch/x86/um/Makefile
+1
-1
arch/x86/um/asm/ptrace.h
arch/x86/um/asm/ptrace.h
+1
-1
arch/x86/um/os-Linux/prctl.c
arch/x86/um/os-Linux/prctl.c
+2
-2
arch/x86/um/syscalls_32.c
arch/x86/um/syscalls_32.c
+7
-0
arch/x86/um/syscalls_64.c
arch/x86/um/syscalls_64.c
+11
-9
fs/exec.c
fs/exec.c
+1
-0
include/linux/compat.h
include/linux/compat.h
+2
-0
include/linux/thread_info.h
include/linux/thread_info.h
+4
-0
未找到文件。
arch/um/include/shared/os.h
浏览文件 @
afa7a17f
...
...
@@ -302,8 +302,8 @@ extern int ignore_sigio_fd(int fd);
extern
void
maybe_sigio_broken
(
int
fd
,
int
read
);
extern
void
sigio_broken
(
int
fd
,
int
read
);
/*
sys-x86_64/
prctl.c */
extern
int
os_arch_prctl
(
int
pid
,
int
code
,
unsigned
long
*
addr
);
/* prctl.c */
extern
int
os_arch_prctl
(
int
pid
,
int
option
,
unsigned
long
*
arg2
);
/* tty.c */
extern
int
get_pty
(
void
);
...
...
arch/x86/entry/syscalls/syscall_32.tbl
浏览文件 @
afa7a17f
...
...
@@ -390,3 +390,4 @@
381 i386 pkey_alloc sys_pkey_alloc
382 i386 pkey_free sys_pkey_free
383 i386 statx sys_statx
384 i386 arch_prctl sys_arch_prctl compat_sys_arch_prctl
arch/x86/include/asm/cpufeatures.h
浏览文件 @
afa7a17f
...
...
@@ -187,6 +187,7 @@
* Reuse free bits when adding new feature flags!
*/
#define X86_FEATURE_RING3MWAIT ( 7*32+ 0)
/* Ring 3 MONITOR/MWAIT */
#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1)
/* Intel CPUID faulting */
#define X86_FEATURE_CPB ( 7*32+ 2)
/* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3)
/* IA32_ENERGY_PERF_BIAS support */
#define X86_FEATURE_CAT_L3 ( 7*32+ 4)
/* Cache Allocation Technology L3 */
...
...
arch/x86/include/asm/msr-index.h
浏览文件 @
afa7a17f
...
...
@@ -45,6 +45,8 @@
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
#define MSR_PLATFORM_INFO 0x000000ce
#define MSR_PLATFORM_INFO_CPUID_FAULT_BIT 31
#define MSR_PLATFORM_INFO_CPUID_FAULT BIT_ULL(MSR_PLATFORM_INFO_CPUID_FAULT_BIT)
#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
...
...
@@ -127,6 +129,7 @@
/* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0)
/* last branch recording */
#define DEBUGCTLMSR_BTF_SHIFT 1
#define DEBUGCTLMSR_BTF (1UL << 1)
/* single-step on branches */
#define DEBUGCTLMSR_TR (1UL << 6)
#define DEBUGCTLMSR_BTS (1UL << 7)
...
...
@@ -552,10 +555,12 @@
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
/* MISC_FEATURE_ENABLES non-architectural features */
#define MSR_MISC_FEATURE_ENABLES 0x00000140
/* MISC_FEATURE
S
_ENABLES non-architectural features */
#define MSR_MISC_FEATURE
S
_ENABLES 0x00000140
#define MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT 1
#define MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT 0
#define MSR_MISC_FEATURES_ENABLES_CPUID_FAULT BIT_ULL(MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT)
#define MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT 1
#define MSR_IA32_TSC_DEADLINE 0x000006E0
...
...
arch/x86/include/asm/processor.h
浏览文件 @
afa7a17f
...
...
@@ -884,6 +884,8 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
extern
int
get_tsc_mode
(
unsigned
long
adr
);
extern
int
set_tsc_mode
(
unsigned
int
val
);
DECLARE_PER_CPU
(
u64
,
msr_misc_features_shadow
);
/* Register/unregister a process' MPX related resource */
#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
...
...
arch/x86/include/asm/proto.h
浏览文件 @
afa7a17f
...
...
@@ -9,6 +9,7 @@ void syscall_init(void);
#ifdef CONFIG_X86_64
void
entry_SYSCALL_64
(
void
);
long
do_arch_prctl_64
(
struct
task_struct
*
task
,
int
option
,
unsigned
long
arg2
);
#endif
#ifdef CONFIG_X86_32
...
...
@@ -30,6 +31,7 @@ void x86_report_nx(void);
extern
int
reboot_force
;
long
do_arch_prctl
(
struct
task_struct
*
task
,
int
code
,
unsigned
long
addr
);
long
do_arch_prctl_common
(
struct
task_struct
*
task
,
int
option
,
unsigned
long
cpuid_enabled
);
#endif
/* _ASM_X86_PROTO_H */
arch/x86/include/asm/thread_info.h
浏览文件 @
afa7a17f
...
...
@@ -87,6 +87,7 @@ struct thread_info {
#define TIF_SECCOMP 8
/* secure computing */
#define TIF_USER_RETURN_NOTIFY 11
/* notify kernel of userspace return */
#define TIF_UPROBE 12
/* breakpointed or singlestepping */
#define TIF_NOCPUID 15
/* CPUID is not accessible in userland */
#define TIF_NOTSC 16
/* TSC is not accessible in userland */
#define TIF_IA32 17
/* IA32 compatibility process */
#define TIF_NOHZ 19
/* in adaptive nohz mode */
...
...
@@ -110,6 +111,7 @@ struct thread_info {
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
#define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_IA32 (1 << TIF_IA32)
#define _TIF_NOHZ (1 << TIF_NOHZ)
...
...
@@ -138,7 +140,7 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
(_TIF_IO_BITMAP|_TIF_NO
CPUID|_TIF_NO
TSC|_TIF_BLOCKSTEP)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
...
...
@@ -239,6 +241,8 @@ static inline int arch_within_stack_frames(const void * const stack,
extern
void
arch_task_cache_init
(
void
);
extern
int
arch_dup_task_struct
(
struct
task_struct
*
dst
,
struct
task_struct
*
src
);
extern
void
arch_release_task_struct
(
struct
task_struct
*
tsk
);
extern
void
arch_setup_new_exec
(
void
);
#define arch_setup_new_exec arch_setup_new_exec
#endif
/* !__ASSEMBLY__ */
#endif
/* _ASM_X86_THREAD_INFO_H */
arch/x86/include/asm/tlbflush.h
浏览文件 @
afa7a17f
...
...
@@ -110,6 +110,16 @@ static inline void cr4_clear_bits(unsigned long mask)
}
}
static
inline
void
cr4_toggle_bits
(
unsigned
long
mask
)
{
unsigned
long
cr4
;
cr4
=
this_cpu_read
(
cpu_tlbstate
.
cr4
);
cr4
^=
mask
;
this_cpu_write
(
cpu_tlbstate
.
cr4
,
cr4
);
__write_cr4
(
cr4
);
}
/* Read the CR4 shadow. */
static
inline
unsigned
long
cr4_read_shadow
(
void
)
{
...
...
arch/x86/include/uapi/asm/prctl.h
浏览文件 @
afa7a17f
#ifndef _ASM_X86_PRCTL_H
#define _ASM_X86_PRCTL_H
#define ARCH_SET_GS 0x1001
#define ARCH_SET_FS 0x1002
#define ARCH_GET_FS 0x1003
#define ARCH_GET_GS 0x1004
#define ARCH_SET_GS 0x1001
#define ARCH_SET_FS 0x1002
#define ARCH_GET_FS 0x1003
#define ARCH_GET_GS 0x1004
#define ARCH_GET_CPUID 0x1011
#define ARCH_SET_CPUID 0x1012
#define ARCH_MAP_VDSO_X32 0x2001
#define ARCH_MAP_VDSO_32 0x2002
...
...
arch/x86/kernel/cpu/intel.c
浏览文件 @
afa7a17f
...
...
@@ -90,16 +90,12 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
return
;
}
if
(
ring3mwait_disabled
)
{
msr_clear_bit
(
MSR_MISC_FEATURE_ENABLES
,
MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT
);
if
(
ring3mwait_disabled
)
return
;
}
msr_set_bit
(
MSR_MISC_FEATURE_ENABLES
,
MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT
);
set_cpu_cap
(
c
,
X86_FEATURE_RING3MWAIT
);
this_cpu_or
(
msr_misc_features_shadow
,
1UL
<<
MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT
);
if
(
c
==
&
boot_cpu_data
)
ELF_HWCAP2
|=
HWCAP2_RING3MWAIT
;
...
...
@@ -488,6 +484,34 @@ static void intel_bsp_resume(struct cpuinfo_x86 *c)
init_intel_energy_perf
(
c
);
}
static
void
init_cpuid_fault
(
struct
cpuinfo_x86
*
c
)
{
u64
msr
;
if
(
!
rdmsrl_safe
(
MSR_PLATFORM_INFO
,
&
msr
))
{
if
(
msr
&
MSR_PLATFORM_INFO_CPUID_FAULT
)
set_cpu_cap
(
c
,
X86_FEATURE_CPUID_FAULT
);
}
}
static
void
init_intel_misc_features
(
struct
cpuinfo_x86
*
c
)
{
u64
msr
;
if
(
rdmsrl_safe
(
MSR_MISC_FEATURES_ENABLES
,
&
msr
))
return
;
/* Clear all MISC features */
this_cpu_write
(
msr_misc_features_shadow
,
0
);
/* Check features and update capabilities and shadow control bits */
init_cpuid_fault
(
c
);
probe_xeon_phi_r3mwait
(
c
);
msr
=
this_cpu_read
(
msr_misc_features_shadow
);
wrmsrl
(
MSR_MISC_FEATURES_ENABLES
,
msr
);
}
static
void
init_intel
(
struct
cpuinfo_x86
*
c
)
{
unsigned
int
l2
=
0
;
...
...
@@ -602,7 +626,7 @@ static void init_intel(struct cpuinfo_x86 *c)
init_intel_energy_perf
(
c
);
probe_xeon_phi_r3mwait
(
c
);
init_intel_misc_features
(
c
);
}
#ifdef CONFIG_X86_32
...
...
arch/x86/kernel/process.c
浏览文件 @
afa7a17f
...
...
@@ -37,6 +37,7 @@
#include <asm/vm86.h>
#include <asm/switch_to.h>
#include <asm/desc.h>
#include <asm/prctl.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
...
...
@@ -124,11 +125,6 @@ void flush_thread(void)
fpu__clear
(
&
tsk
->
thread
.
fpu
);
}
static
void
hard_disable_TSC
(
void
)
{
cr4_set_bits
(
X86_CR4_TSD
);
}
void
disable_TSC
(
void
)
{
preempt_disable
();
...
...
@@ -137,15 +133,10 @@ void disable_TSC(void)
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
hard_disable_TSC
(
);
cr4_set_bits
(
X86_CR4_TSD
);
preempt_enable
();
}
static
void
hard_enable_TSC
(
void
)
{
cr4_clear_bits
(
X86_CR4_TSD
);
}
static
void
enable_TSC
(
void
)
{
preempt_disable
();
...
...
@@ -154,7 +145,7 @@ static void enable_TSC(void)
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
hard_enable_TSC
(
);
cr4_clear_bits
(
X86_CR4_TSD
);
preempt_enable
();
}
...
...
@@ -182,54 +173,129 @@ int set_tsc_mode(unsigned int val)
return
0
;
}
void
__switch_to_xtra
(
struct
task_struct
*
prev_p
,
struct
task_struct
*
next_p
,
struct
tss_struct
*
tss
)
{
struct
thread_struct
*
prev
,
*
next
;
prev
=
&
prev_p
->
thread
;
next
=
&
next_p
->
thread
;
DEFINE_PER_CPU
(
u64
,
msr_misc_features_shadow
);
if
(
test_tsk_thread_flag
(
prev_p
,
TIF_BLOCKSTEP
)
^
test_tsk_thread_flag
(
next_p
,
TIF_BLOCKSTEP
))
{
unsigned
long
debugctl
=
get_debugctlmsr
()
;
static
void
set_cpuid_faulting
(
bool
on
)
{
u64
msrval
;
debugctl
&=
~
DEBUGCTLMSR_BTF
;
if
(
test_tsk_thread_flag
(
next_p
,
TIF_BLOCKSTEP
))
debugctl
|=
DEBUGCTLMSR_BTF
;
msrval
=
this_cpu_read
(
msr_misc_features_shadow
);
msrval
&=
~
MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
;
msrval
|=
(
on
<<
MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT
);
this_cpu_write
(
msr_misc_features_shadow
,
msrval
);
wrmsrl
(
MSR_MISC_FEATURES_ENABLES
,
msrval
);
}
update_debugctlmsr
(
debugctl
);
static
void
disable_cpuid
(
void
)
{
preempt_disable
();
if
(
!
test_and_set_thread_flag
(
TIF_NOCPUID
))
{
/*
* Must flip the CPU state synchronously with
* TIF_NOCPUID in the current running context.
*/
set_cpuid_faulting
(
true
);
}
preempt_enable
();
}
if
(
test_tsk_thread_flag
(
prev_p
,
TIF_NOTSC
)
^
test_tsk_thread_flag
(
next_p
,
TIF_NOTSC
))
{
/* prev and next are different */
if
(
test_tsk_thread_flag
(
next_p
,
TIF_NOTSC
))
hard_disable_TSC
();
else
hard_enable_TSC
();
static
void
enable_cpuid
(
void
)
{
preempt_disable
();
if
(
test_and_clear_thread_flag
(
TIF_NOCPUID
))
{
/*
* Must flip the CPU state synchronously with
* TIF_NOCPUID in the current running context.
*/
set_cpuid_faulting
(
false
);
}
preempt_enable
();
}
static
int
get_cpuid_mode
(
void
)
{
return
!
test_thread_flag
(
TIF_NOCPUID
);
}
static
int
set_cpuid_mode
(
struct
task_struct
*
task
,
unsigned
long
cpuid_enabled
)
{
if
(
!
static_cpu_has
(
X86_FEATURE_CPUID_FAULT
))
return
-
ENODEV
;
if
(
cpuid_enabled
)
enable_cpuid
();
else
disable_cpuid
();
return
0
;
}
/*
* Called immediately after a successful exec.
*/
void
arch_setup_new_exec
(
void
)
{
/* If cpuid was previously disabled for this task, re-enable it. */
if
(
test_thread_flag
(
TIF_NOCPUID
))
enable_cpuid
();
}
if
(
test_tsk_thread_flag
(
next_p
,
TIF_IO_BITMAP
))
{
static
inline
void
switch_to_bitmap
(
struct
tss_struct
*
tss
,
struct
thread_struct
*
prev
,
struct
thread_struct
*
next
,
unsigned
long
tifp
,
unsigned
long
tifn
)
{
if
(
tifn
&
_TIF_IO_BITMAP
)
{
/*
* Copy the relevant range of the IO bitmap.
* Normally this is 128 bytes or less:
*/
memcpy
(
tss
->
io_bitmap
,
next
->
io_bitmap_ptr
,
max
(
prev
->
io_bitmap_max
,
next
->
io_bitmap_max
));
/*
* Make sure that the TSS limit is correct for the CPU
* to notice the IO bitmap.
*/
refresh_tss_limit
();
}
else
if
(
t
est_tsk_thread_flag
(
prev_p
,
TIF_IO_BITMAP
)
)
{
}
else
if
(
t
ifp
&
_TIF_IO_BITMAP
)
{
/*
* Clear any possible leftover bits:
*/
memset
(
tss
->
io_bitmap
,
0xff
,
prev
->
io_bitmap_max
);
}
}
void
__switch_to_xtra
(
struct
task_struct
*
prev_p
,
struct
task_struct
*
next_p
,
struct
tss_struct
*
tss
)
{
struct
thread_struct
*
prev
,
*
next
;
unsigned
long
tifp
,
tifn
;
prev
=
&
prev_p
->
thread
;
next
=
&
next_p
->
thread
;
tifn
=
READ_ONCE
(
task_thread_info
(
next_p
)
->
flags
);
tifp
=
READ_ONCE
(
task_thread_info
(
prev_p
)
->
flags
);
switch_to_bitmap
(
tss
,
prev
,
next
,
tifp
,
tifn
);
propagate_user_return_notify
(
prev_p
,
next_p
);
if
((
tifp
&
_TIF_BLOCKSTEP
||
tifn
&
_TIF_BLOCKSTEP
)
&&
arch_has_block_step
())
{
unsigned
long
debugctl
,
msk
;
rdmsrl
(
MSR_IA32_DEBUGCTLMSR
,
debugctl
);
debugctl
&=
~
DEBUGCTLMSR_BTF
;
msk
=
tifn
&
_TIF_BLOCKSTEP
;
debugctl
|=
(
msk
>>
TIF_BLOCKSTEP
)
<<
DEBUGCTLMSR_BTF_SHIFT
;
wrmsrl
(
MSR_IA32_DEBUGCTLMSR
,
debugctl
);
}
if
((
tifp
^
tifn
)
&
_TIF_NOTSC
)
cr4_toggle_bits
(
X86_CR4_TSD
);
if
((
tifp
^
tifn
)
&
_TIF_NOCPUID
)
set_cpuid_faulting
(
!!
(
tifn
&
_TIF_NOCPUID
));
}
/*
...
...
@@ -550,3 +616,16 @@ unsigned long get_wchan(struct task_struct *p)
put_task_stack
(
p
);
return
ret
;
}
long
do_arch_prctl_common
(
struct
task_struct
*
task
,
int
option
,
unsigned
long
cpuid_enabled
)
{
switch
(
option
)
{
case
ARCH_GET_CPUID
:
return
get_cpuid_mode
();
case
ARCH_SET_CPUID
:
return
set_cpuid_mode
(
task
,
cpuid_enabled
);
}
return
-
EINVAL
;
}
arch/x86/kernel/process_32.c
浏览文件 @
afa7a17f
...
...
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
#include <linux/syscalls.h>
#include <asm/pgtable.h>
#include <asm/ldt.h>
...
...
@@ -56,6 +57,7 @@
#include <asm/switch_to.h>
#include <asm/vm86.h>
#include <asm/intel_rdt.h>
#include <asm/proto.h>
void
__show_regs
(
struct
pt_regs
*
regs
,
int
all
)
{
...
...
@@ -304,3 +306,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
return
prev_p
;
}
SYSCALL_DEFINE2
(
arch_prctl
,
int
,
option
,
unsigned
long
,
arg2
)
{
return
do_arch_prctl_common
(
current
,
option
,
arg2
);
}
arch/x86/kernel/process_64.c
浏览文件 @
afa7a17f
...
...
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
#include <linux/syscalls.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
...
...
@@ -204,7 +205,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
(
struct
user_desc
__user
*
)
tls
,
0
);
else
#endif
err
=
do_arch_prctl
(
p
,
ARCH_SET_FS
,
tls
);
err
=
do_arch_prctl
_64
(
p
,
ARCH_SET_FS
,
tls
);
if
(
err
)
goto
out
;
}
...
...
@@ -547,70 +548,72 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
}
#endif
long
do_arch_prctl
(
struct
task_struct
*
task
,
int
code
,
unsigned
long
addr
)
long
do_arch_prctl
_64
(
struct
task_struct
*
task
,
int
option
,
unsigned
long
arg2
)
{
int
ret
=
0
;
int
doit
=
task
==
current
;
int
cpu
;
switch
(
code
)
{
switch
(
option
)
{
case
ARCH_SET_GS
:
if
(
a
ddr
>=
TASK_SIZE_MAX
)
if
(
a
rg2
>=
TASK_SIZE_MAX
)
return
-
EPERM
;
cpu
=
get_cpu
();
task
->
thread
.
gsindex
=
0
;
task
->
thread
.
gsbase
=
a
ddr
;
task
->
thread
.
gsbase
=
a
rg2
;
if
(
doit
)
{
load_gs_index
(
0
);
ret
=
wrmsrl_safe
(
MSR_KERNEL_GS_BASE
,
a
ddr
);
ret
=
wrmsrl_safe
(
MSR_KERNEL_GS_BASE
,
a
rg2
);
}
put_cpu
();
break
;
case
ARCH_SET_FS
:
/* Not strictly needed for fs, but do it for symmetry
with gs */
if
(
a
ddr
>=
TASK_SIZE_MAX
)
if
(
a
rg2
>=
TASK_SIZE_MAX
)
return
-
EPERM
;
cpu
=
get_cpu
();
task
->
thread
.
fsindex
=
0
;
task
->
thread
.
fsbase
=
a
ddr
;
task
->
thread
.
fsbase
=
a
rg2
;
if
(
doit
)
{
/* set the selector to 0 to not confuse __switch_to */
loadsegment
(
fs
,
0
);
ret
=
wrmsrl_safe
(
MSR_FS_BASE
,
a
ddr
);
ret
=
wrmsrl_safe
(
MSR_FS_BASE
,
a
rg2
);
}
put_cpu
();
break
;
case
ARCH_GET_FS
:
{
unsigned
long
base
;
if
(
doit
)
rdmsrl
(
MSR_FS_BASE
,
base
);
else
base
=
task
->
thread
.
fsbase
;
ret
=
put_user
(
base
,
(
unsigned
long
__user
*
)
a
ddr
);
ret
=
put_user
(
base
,
(
unsigned
long
__user
*
)
a
rg2
);
break
;
}
case
ARCH_GET_GS
:
{
unsigned
long
base
;
if
(
doit
)
rdmsrl
(
MSR_KERNEL_GS_BASE
,
base
);
else
base
=
task
->
thread
.
gsbase
;
ret
=
put_user
(
base
,
(
unsigned
long
__user
*
)
a
ddr
);
ret
=
put_user
(
base
,
(
unsigned
long
__user
*
)
a
rg2
);
break
;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
# ifdef CONFIG_X86_X32_ABI
case
ARCH_MAP_VDSO_X32
:
return
prctl_map_vdso
(
&
vdso_image_x32
,
a
ddr
);
return
prctl_map_vdso
(
&
vdso_image_x32
,
a
rg2
);
# endif
# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
case
ARCH_MAP_VDSO_32
:
return
prctl_map_vdso
(
&
vdso_image_32
,
a
ddr
);
return
prctl_map_vdso
(
&
vdso_image_32
,
a
rg2
);
# endif
case
ARCH_MAP_VDSO_64
:
return
prctl_map_vdso
(
&
vdso_image_64
,
a
ddr
);
return
prctl_map_vdso
(
&
vdso_image_64
,
a
rg2
);
#endif
default:
...
...
@@ -621,10 +624,23 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
return
ret
;
}
long
sys_arch_prctl
(
int
code
,
unsigned
long
addr
)
SYSCALL_DEFINE2
(
arch_prctl
,
int
,
option
,
unsigned
long
,
arg2
)
{
long
ret
;
ret
=
do_arch_prctl_64
(
current
,
option
,
arg2
);
if
(
ret
==
-
EINVAL
)
ret
=
do_arch_prctl_common
(
current
,
option
,
arg2
);
return
ret
;
}
#ifdef CONFIG_IA32_EMULATION
COMPAT_SYSCALL_DEFINE2
(
arch_prctl
,
int
,
option
,
unsigned
long
,
arg2
)
{
return
do_arch_prctl
(
current
,
code
,
addr
);
return
do_arch_prctl
_common
(
current
,
option
,
arg2
);
}
#endif
unsigned
long
KSTK_ESP
(
struct
task_struct
*
task
)
{
...
...
arch/x86/kernel/ptrace.c
浏览文件 @
afa7a17f
...
...
@@ -396,12 +396,12 @@ static int putreg(struct task_struct *child,
if
(
value
>=
TASK_SIZE_MAX
)
return
-
EIO
;
/*
* When changing the segment base, use do_arch_prctl
* When changing the segment base, use do_arch_prctl
_64
* to set either thread.fs or thread.fsindex and the
* corresponding GDT slot.
*/
if
(
child
->
thread
.
fsbase
!=
value
)
return
do_arch_prctl
(
child
,
ARCH_SET_FS
,
value
);
return
do_arch_prctl
_64
(
child
,
ARCH_SET_FS
,
value
);
return
0
;
case
offsetof
(
struct
user_regs_struct
,
gs_base
):
/*
...
...
@@ -410,7 +410,7 @@ static int putreg(struct task_struct *child,
if
(
value
>=
TASK_SIZE_MAX
)
return
-
EIO
;
if
(
child
->
thread
.
gsbase
!=
value
)
return
do_arch_prctl
(
child
,
ARCH_SET_GS
,
value
);
return
do_arch_prctl
_64
(
child
,
ARCH_SET_GS
,
value
);
return
0
;
#endif
}
...
...
@@ -869,7 +869,7 @@ long arch_ptrace(struct task_struct *child, long request,
Works just like arch_prctl, except that the arguments
are reversed. */
case
PTRACE_ARCH_PRCTL
:
ret
=
do_arch_prctl
(
child
,
data
,
addr
);
ret
=
do_arch_prctl
_64
(
child
,
data
,
addr
);
break
;
#endif
...
...
arch/x86/um/Makefile
浏览文件 @
afa7a17f
...
...
@@ -16,7 +16,7 @@ obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
ifeq
($(CONFIG_X86_32),y)
obj-y
+=
checksum_32.o
obj-y
+=
checksum_32.o
syscalls_32.o
obj-$(CONFIG_ELF_CORE)
+=
elfcore.o
subarch-y
=
../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
...
...
arch/x86/um/asm/ptrace.h
浏览文件 @
afa7a17f
...
...
@@ -78,7 +78,7 @@ static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
return
-
ENOSYS
;
}
extern
long
arch_prctl
(
struct
task_struct
*
task
,
int
code
,
extern
long
arch_prctl
(
struct
task_struct
*
task
,
int
option
,
unsigned
long
__user
*
addr
);
#endif
...
...
arch/x86/um/os-Linux/prctl.c
浏览文件 @
afa7a17f
...
...
@@ -6,7 +6,7 @@
#include <sys/ptrace.h>
#include <asm/ptrace.h>
int
os_arch_prctl
(
int
pid
,
int
code
,
unsigned
long
*
addr
)
int
os_arch_prctl
(
int
pid
,
int
option
,
unsigned
long
*
arg2
)
{
return
ptrace
(
PTRACE_ARCH_PRCTL
,
pid
,
(
unsigned
long
)
addr
,
code
);
return
ptrace
(
PTRACE_ARCH_PRCTL
,
pid
,
(
unsigned
long
)
arg2
,
option
);
}
arch/x86/um/syscalls_32.c
0 → 100644
浏览文件 @
afa7a17f
#include <linux/syscalls.h>
#include <os.h>
SYSCALL_DEFINE2
(
arch_prctl
,
int
,
option
,
unsigned
long
,
arg2
)
{
return
-
EINVAL
;
}
arch/x86/um/syscalls_64.c
浏览文件 @
afa7a17f
...
...
@@ -7,13 +7,15 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/prctl.h>
/* XXX This should get the constants from libc */
#include <os.h>
long
arch_prctl
(
struct
task_struct
*
task
,
int
code
,
unsigned
long
__user
*
addr
)
long
arch_prctl
(
struct
task_struct
*
task
,
int
option
)
unsigned
long
__user
*
arg2
)
{
unsigned
long
*
ptr
=
a
ddr
,
tmp
;
unsigned
long
*
ptr
=
a
rg2
,
tmp
;
long
ret
;
int
pid
=
task
->
mm
->
context
.
id
.
u
.
pid
;
...
...
@@ -30,7 +32,7 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
* arch_prctl is run on the host, then the registers are read
* back.
*/
switch
(
code
)
{
switch
(
option
)
{
case
ARCH_SET_FS
:
case
ARCH_SET_GS
:
ret
=
restore_registers
(
pid
,
&
current
->
thread
.
regs
.
regs
);
...
...
@@ -50,11 +52,11 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
ptr
=
&
tmp
;
}
ret
=
os_arch_prctl
(
pid
,
code
,
ptr
);
ret
=
os_arch_prctl
(
pid
,
option
,
ptr
);
if
(
ret
)
return
ret
;
switch
(
code
)
{
switch
(
option
)
{
case
ARCH_SET_FS
:
current
->
thread
.
arch
.
fs
=
(
unsigned
long
)
ptr
;
ret
=
save_registers
(
pid
,
&
current
->
thread
.
regs
.
regs
);
...
...
@@ -63,19 +65,19 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
ret
=
save_registers
(
pid
,
&
current
->
thread
.
regs
.
regs
);
break
;
case
ARCH_GET_FS
:
ret
=
put_user
(
tmp
,
a
ddr
);
ret
=
put_user
(
tmp
,
a
rg2
);
break
;
case
ARCH_GET_GS
:
ret
=
put_user
(
tmp
,
a
ddr
);
ret
=
put_user
(
tmp
,
a
rg2
);
break
;
}
return
ret
;
}
long
sys_arch_prctl
(
int
code
,
unsigned
long
addr
)
SYSCALL_DEFINE2
(
arch_prctl
,
int
,
option
,
unsigned
long
,
arg2
)
{
return
arch_prctl
(
current
,
code
,
(
unsigned
long
__user
*
)
addr
);
return
arch_prctl
(
current
,
option
,
(
unsigned
long
__user
*
)
arg2
);
}
void
arch_switch_to
(
struct
task_struct
*
to
)
...
...
fs/exec.c
浏览文件 @
afa7a17f
...
...
@@ -1320,6 +1320,7 @@ void setup_new_exec(struct linux_binprm * bprm)
else
set_dumpable
(
current
->
mm
,
suid_dumpable
);
arch_setup_new_exec
();
perf_event_exec
();
__set_task_comm
(
current
,
kbasename
(
bprm
->
filename
),
true
);
...
...
include/linux/compat.h
浏览文件 @
afa7a17f
...
...
@@ -723,6 +723,8 @@ asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
asmlinkage
long
compat_sys_fanotify_mark
(
int
,
unsigned
int
,
__u32
,
__u32
,
int
,
const
char
__user
*
);
asmlinkage
long
compat_sys_arch_prctl
(
int
option
,
unsigned
long
arg2
);
/*
* For most but not all architectures, "am I in a compat syscall?" and
* "am I a compat task?" are the same question. For architectures on which
...
...
include/linux/thread_info.h
浏览文件 @
afa7a17f
...
...
@@ -101,6 +101,10 @@ static inline void check_object_size(const void *ptr, unsigned long n,
{
}
#endif
/* CONFIG_HARDENED_USERCOPY */
#ifndef arch_setup_new_exec
static
inline
void
arch_setup_new_exec
(
void
)
{
}
#endif
#endif
/* __KERNEL__ */
#endif
/* _LINUX_THREAD_INFO_H */
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录