Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
Third Party Musl
提交
d2af330f
T
Third Party Musl
项目概览
OpenHarmony
/
Third Party Musl
1 年多 前同步成功
通知
37
Star
125
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
Third Party Musl
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
d2af330f
编写于
4月 28, 2022
作者:
O
openharmony_ci
提交者:
Gitee
4月 28, 2022
浏览文件
操作
浏览文件
下载
差异文件
!281 线程栈命名
Merge pull request !281 from fangting10/master
上级
0d0bd42d
21ec438b
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
435 addition
and
0 deletion
+435
-0
musl_src.gni
musl_src.gni
+1
-0
porting/linux/user/src/thread/pthread_create.c
porting/linux/user/src/thread/pthread_create.c
+434
-0
未找到文件。
musl_src.gni
浏览文件 @
d2af330f
...
...
@@ -1854,6 +1854,7 @@ musl_src_porting_file = [
"src/exit/atexit.c",
"crt/arm/crti.s",
"crt/aarch64/crti.s",
"src/thread/pthread_create.c",
]
musl_inc_hook_files = [
...
...
porting/linux/user/src/thread/pthread_create.c
0 → 100644
浏览文件 @
d2af330f
#define _GNU_SOURCE
#define ANON_STACK_NAME_SIZE 50
#include "pthread_impl.h"
#include "stdio_impl.h"
#include "libc.h"
#include "lock.h"
#include <sys/mman.h>
#include <string.h>
#include <stddef.h>
#include <stdarg.h>
#include <sys/prctl.h>
void
log_print
(
const
char
*
info
,...)
{
va_list
ap
;
va_start
(
ap
,
info
);
vfprintf
(
stdout
,
info
,
ap
);
va_end
(
ap
);
}
static
void
dummy_0
()
{
}
weak_alias
(
dummy_0
,
__acquire_ptc
);
weak_alias
(
dummy_0
,
__release_ptc
);
weak_alias
(
dummy_0
,
__pthread_tsd_run_dtors
);
weak_alias
(
dummy_0
,
__do_orphaned_stdio_locks
);
weak_alias
(
dummy_0
,
__dl_thread_cleanup
);
weak_alias
(
dummy_0
,
__membarrier_init
);
static
int
tl_lock_count
;
static
int
tl_lock_waiters
;
void
__tl_lock
(
void
)
{
int
tid
=
__pthread_self
()
->
tid
;
int
val
=
__thread_list_lock
;
if
(
val
==
tid
)
{
tl_lock_count
++
;
return
;
}
while
((
val
=
a_cas
(
&
__thread_list_lock
,
0
,
tid
)))
__wait
(
&
__thread_list_lock
,
&
tl_lock_waiters
,
val
,
0
);
}
void
__tl_unlock
(
void
)
{
if
(
tl_lock_count
)
{
tl_lock_count
--
;
return
;
}
a_store
(
&
__thread_list_lock
,
0
);
if
(
tl_lock_waiters
)
__wake
(
&
__thread_list_lock
,
1
,
0
);
}
void
__tl_sync
(
pthread_t
td
)
{
a_barrier
();
int
val
=
__thread_list_lock
;
if
(
!
val
)
return
;
__wait
(
&
__thread_list_lock
,
&
tl_lock_waiters
,
val
,
0
);
if
(
tl_lock_waiters
)
__wake
(
&
__thread_list_lock
,
1
,
0
);
}
_Noreturn
void
__pthread_exit
(
void
*
result
)
{
pthread_t
self
=
__pthread_self
();
sigset_t
set
;
self
->
canceldisable
=
1
;
self
->
cancelasync
=
0
;
self
->
result
=
result
;
while
(
self
->
cancelbuf
)
{
void
(
*
f
)(
void
*
)
=
self
->
cancelbuf
->
__f
;
void
*
x
=
self
->
cancelbuf
->
__x
;
self
->
cancelbuf
=
self
->
cancelbuf
->
__next
;
f
(
x
);
}
__pthread_tsd_run_dtors
();
/* Access to target the exiting thread with syscalls that use
* its kernel tid is controlled by killlock. For detached threads,
* any use past this point would have undefined behavior, but for
* joinable threads it's a valid usage that must be handled. */
LOCK
(
self
->
killlock
);
/* The thread list lock must be AS-safe, and thus requires
* application signals to be blocked before it can be taken. */
__block_app_sigs
(
&
set
);
__tl_lock
();
/* If this is the only thread in the list, don't proceed with
* termination of the thread, but restore the previous lock and
* signal state to prepare for exit to call atexit handlers. */
if
(
self
->
next
==
self
)
{
__tl_unlock
();
__restore_sigs
(
&
set
);
UNLOCK
(
self
->
killlock
);
exit
(
0
);
}
/* At this point we are committed to thread termination. Unlink
* the thread from the list. This change will not be visible
* until the lock is released, which only happens after SYS_exit
* has been called, via the exit futex address pointing at the lock. */
libc
.
threads_minus_1
--
;
self
->
next
->
prev
=
self
->
prev
;
self
->
prev
->
next
=
self
->
next
;
self
->
prev
=
self
->
next
=
self
;
/* Process robust list in userspace to handle non-pshared mutexes
* and the detached thread case where the robust list head will
* be invalid when the kernel would process it. */
__vm_lock
();
volatile
void
*
volatile
*
rp
;
while
((
rp
=
self
->
robust_list
.
head
)
&&
rp
!=
&
self
->
robust_list
.
head
)
{
pthread_mutex_t
*
m
=
(
void
*
)((
char
*
)
rp
-
offsetof
(
pthread_mutex_t
,
_m_next
));
int
waiters
=
m
->
_m_waiters
;
int
priv
=
(
m
->
_m_type
&
128
)
^
128
;
self
->
robust_list
.
pending
=
rp
;
self
->
robust_list
.
head
=
*
rp
;
int
cont
=
a_swap
(
&
m
->
_m_lock
,
0x40000000
);
self
->
robust_list
.
pending
=
0
;
if
(
cont
<
0
||
waiters
)
__wake
(
&
m
->
_m_lock
,
1
,
priv
);
}
__vm_unlock
();
__do_orphaned_stdio_locks
();
__dl_thread_cleanup
();
/* This atomic potentially competes with a concurrent pthread_detach
* call; the loser is responsible for freeing thread resources. */
int
state
=
a_cas
(
&
self
->
detach_state
,
DT_JOINABLE
,
DT_EXITING
);
if
(
state
==
DT_DETACHED
&&
self
->
map_base
)
{
/* Detached threads must block even implementation-internal
* signals, since they will not have a stack in their last
* moments of existence. */
__block_all_sigs
(
&
set
);
/* Robust list will no longer be valid, and was already
* processed above, so unregister it with the kernel. */
if
(
self
->
robust_list
.
off
)
__syscall
(
SYS_set_robust_list
,
0
,
3
*
sizeof
(
long
));
/* Since __unmapself bypasses the normal munmap code path,
* explicitly wait for vmlock holders first. */
__vm_wait
();
/* The following call unmaps the thread's stack mapping
* and then exits without touching the stack. */
__unmapself
(
self
->
map_base
,
self
->
map_size
);
}
/* Wake any joiner. */
__wake
(
&
self
->
detach_state
,
1
,
1
);
/* After the kernel thread exits, its tid may be reused. Clear it
* to prevent inadvertent use and inform functions that would use
* it that it's no longer available. */
self
->
tid
=
0
;
UNLOCK
(
self
->
killlock
);
for
(;;)
__syscall
(
SYS_exit
,
0
);
}
void
__do_cleanup_push
(
struct
__ptcb
*
cb
)
{
struct
pthread
*
self
=
__pthread_self
();
cb
->
__next
=
self
->
cancelbuf
;
self
->
cancelbuf
=
cb
;
}
void
__do_cleanup_pop
(
struct
__ptcb
*
cb
)
{
__pthread_self
()
->
cancelbuf
=
cb
->
__next
;
}
struct
start_args
{
void
*
(
*
start_func
)(
void
*
);
void
*
start_arg
;
volatile
int
control
;
unsigned
long
sig_mask
[
_NSIG
/
8
/
sizeof
(
long
)];
};
static
int
start
(
void
*
p
)
{
struct
start_args
*
args
=
p
;
int
state
=
args
->
control
;
if
(
state
)
{
if
(
a_cas
(
&
args
->
control
,
1
,
2
)
==
1
)
__wait
(
&
args
->
control
,
0
,
2
,
1
);
if
(
args
->
control
)
{
__syscall
(
SYS_set_tid_address
,
&
args
->
control
);
for
(;;)
__syscall
(
SYS_exit
,
0
);
}
}
__syscall
(
SYS_rt_sigprocmask
,
SIG_SETMASK
,
&
args
->
sig_mask
,
0
,
_NSIG
/
8
);
__pthread_exit
(
args
->
start_func
(
args
->
start_arg
));
return
0
;
}
static
int
start_c11
(
void
*
p
)
{
struct
start_args
*
args
=
p
;
int
(
*
start
)(
void
*
)
=
(
int
(
*
)(
void
*
))
args
->
start_func
;
__pthread_exit
((
void
*
)(
uintptr_t
)
start
(
args
->
start_arg
));
return
0
;
}
#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
/* pthread_key_create.c overrides this */
static
volatile
size_t
dummy
=
0
;
weak_alias
(
dummy
,
__pthread_tsd_size
);
static
void
*
dummy_tsd
[
1
]
=
{
0
};
weak_alias
(
dummy_tsd
,
__pthread_tsd_main
);
static
FILE
*
volatile
dummy_file
=
0
;
weak_alias
(
dummy_file
,
__stdin_used
);
weak_alias
(
dummy_file
,
__stdout_used
);
weak_alias
(
dummy_file
,
__stderr_used
);
static
void
init_file_lock
(
FILE
*
f
)
{
if
(
f
&&
f
->
lock
<
0
)
f
->
lock
=
0
;
}
int
__pthread_create
(
pthread_t
*
restrict
res
,
const
pthread_attr_t
*
restrict
attrp
,
void
*
(
*
entry
)(
void
*
),
void
*
restrict
arg
)
{
int
ret
,
c11
=
(
attrp
==
__ATTRP_C11_THREAD
);
size_t
size
,
guard
,
size_len
;
struct
pthread
*
self
,
*
new
;
unsigned
char
*
map
=
0
,
*
stack
=
0
,
*
tsd
=
0
,
*
stack_limit
,
*
start_addr
;
unsigned
flags
=
CLONE_VM
|
CLONE_FS
|
CLONE_FILES
|
CLONE_SIGHAND
|
CLONE_THREAD
|
CLONE_SYSVSEM
|
CLONE_SETTLS
|
CLONE_PARENT_SETTID
|
CLONE_CHILD_CLEARTID
|
CLONE_DETACHED
;
pthread_attr_t
attr
=
{
0
};
sigset_t
set
;
if
(
!
libc
.
can_do_threads
)
return
ENOSYS
;
self
=
__pthread_self
();
if
(
!
libc
.
threaded
)
{
for
(
FILE
*
f
=*
__ofl_lock
();
f
;
f
=
f
->
next
)
init_file_lock
(
f
);
__ofl_unlock
();
init_file_lock
(
__stdin_used
);
init_file_lock
(
__stdout_used
);
init_file_lock
(
__stderr_used
);
__syscall
(
SYS_rt_sigprocmask
,
SIG_UNBLOCK
,
SIGPT_SET
,
0
,
_NSIG
/
8
);
self
->
tsd
=
(
void
**
)
__pthread_tsd_main
;
__membarrier_init
();
libc
.
threaded
=
1
;
}
if
(
attrp
&&
!
c11
)
attr
=
*
attrp
;
__acquire_ptc
();
if
(
!
attrp
||
c11
)
{
attr
.
_a_stacksize
=
__default_stacksize
;
attr
.
_a_guardsize
=
__default_guardsize
;
}
if
(
attr
.
_a_stackaddr
)
{
size_t
need
=
libc
.
tls_size
+
__pthread_tsd_size
;
size
=
attr
.
_a_stacksize
;
stack
=
(
void
*
)(
attr
.
_a_stackaddr
&
-
16
);
stack_limit
=
(
void
*
)(
attr
.
_a_stackaddr
-
size
);
/* Use application-provided stack for TLS only when
* it does not take more than ~12% or 2k of the
* application's stack space. */
if
(
need
<
size
/
8
&&
need
<
2048
)
{
tsd
=
stack
-
__pthread_tsd_size
;
stack
=
tsd
-
libc
.
tls_size
;
memset
(
stack
,
0
,
need
);
}
else
{
size
=
ROUND
(
need
);
}
guard
=
0
;
}
else
{
guard
=
ROUND
(
attr
.
_a_guardsize
);
size
=
guard
+
ROUND
(
attr
.
_a_stacksize
+
libc
.
tls_size
+
__pthread_tsd_size
);
}
if
(
!
tsd
)
{
if
(
guard
)
{
map
=
__mmap
(
0
,
size
,
PROT_NONE
,
MAP_PRIVATE
|
MAP_ANON
,
-
1
,
0
);
if
(
map
==
MAP_FAILED
)
goto
fail
;
if
(
__mprotect
(
map
+
guard
,
size
-
guard
,
PROT_READ
|
PROT_WRITE
)
&&
errno
!=
ENOSYS
)
{
__munmap
(
map
,
size
);
goto
fail
;
}
char
guard_name
[
ANON_STACK_NAME_SIZE
];
snprintf
(
guard_name
,
ANON_STACK_NAME_SIZE
,
"guard:%d"
,
__pthread_self
()
->
tid
);
prctl
(
PR_SET_VMA
,
PR_SET_VMA_ANON_NAME
,
map
,
guard
,
guard_name
);
start_addr
=
map
+
guard
;
size_len
=
size
-
guard
;
}
else
{
map
=
__mmap
(
0
,
size
,
PROT_READ
|
PROT_WRITE
,
MAP_PRIVATE
|
MAP_ANON
,
-
1
,
0
);
if
(
map
==
MAP_FAILED
)
goto
fail
;
start_addr
=
map
;
size_len
=
size
;
}
tsd
=
map
+
size
-
__pthread_tsd_size
;
if
(
!
stack
)
{
stack
=
tsd
-
libc
.
tls_size
;
stack_limit
=
map
+
guard
;
}
char
name
[
ANON_STACK_NAME_SIZE
];
snprintf
(
name
,
ANON_STACK_NAME_SIZE
,
"stack:%d"
,
__pthread_self
()
->
tid
);
prctl
(
PR_SET_VMA
,
PR_SET_VMA_ANON_NAME
,
start_addr
,
size_len
,
name
);
}
new
=
__copy_tls
(
tsd
-
libc
.
tls_size
);
new
->
map_base
=
map
;
new
->
map_size
=
size
;
new
->
stack
=
stack
;
new
->
stack_size
=
stack
-
stack_limit
;
new
->
guard_size
=
guard
;
new
->
self
=
new
;
new
->
tsd
=
(
void
*
)
tsd
;
new
->
locale
=
&
libc
.
global_locale
;
if
(
attr
.
_a_detach
)
{
new
->
detach_state
=
DT_DETACHED
;
}
else
{
new
->
detach_state
=
DT_JOINABLE
;
}
new
->
robust_list
.
head
=
&
new
->
robust_list
.
head
;
new
->
CANARY
=
self
->
CANARY
;
new
->
sysinfo
=
self
->
sysinfo
;
/* Setup argument structure for the new thread on its stack.
* It's safe to access from the caller only until the thread
* list is unlocked. */
stack
-=
(
uintptr_t
)
stack
%
sizeof
(
uintptr_t
);
stack
-=
sizeof
(
struct
start_args
);
struct
start_args
*
args
=
(
void
*
)
stack
;
args
->
start_func
=
entry
;
args
->
start_arg
=
arg
;
args
->
control
=
attr
.
_a_sched
?
1
:
0
;
/* Application signals (but not the synccall signal) must be
* blocked before the thread list lock can be taken, to ensure
* that the lock is AS-safe. */
__block_app_sigs
(
&
set
);
/* Ensure SIGCANCEL is unblocked in new thread. This requires
* working with a copy of the set so we can restore the
* original mask in the calling thread. */
memcpy
(
&
args
->
sig_mask
,
&
set
,
sizeof
args
->
sig_mask
);
args
->
sig_mask
[(
SIGCANCEL
-
1
)
/
8
/
sizeof
(
long
)]
&=
~
(
1UL
<<
((
SIGCANCEL
-
1
)
%
(
8
*
sizeof
(
long
))));
__tl_lock
();
libc
.
threads_minus_1
++
;
ret
=
__clone
((
c11
?
start_c11
:
start
),
stack
,
flags
,
args
,
&
new
->
tid
,
TP_ADJ
(
new
),
&
__thread_list_lock
);
/* All clone failures translate to EAGAIN. If explicit scheduling
* was requested, attempt it before unlocking the thread list so
* that the failed thread is never exposed and so that we can
* clean up all transient resource usage before returning. */
if
(
ret
<
0
)
{
ret
=
-
EAGAIN
;
}
else
if
(
attr
.
_a_sched
)
{
ret
=
__syscall
(
SYS_sched_setscheduler
,
new
->
tid
,
attr
.
_a_policy
,
&
attr
.
_a_prio
);
if
(
a_swap
(
&
args
->
control
,
ret
?
3
:
0
)
==
2
)
__wake
(
&
args
->
control
,
1
,
1
);
if
(
ret
)
__wait
(
&
args
->
control
,
0
,
3
,
0
);
}
if
(
ret
>=
0
)
{
new
->
next
=
self
->
next
;
new
->
prev
=
self
;
new
->
next
->
prev
=
new
;
new
->
prev
->
next
=
new
;
}
else
{
libc
.
threads_minus_1
--
;
}
__tl_unlock
();
__restore_sigs
(
&
set
);
__release_ptc
();
if
(
ret
<
0
)
{
if
(
map
)
__munmap
(
map
,
size
);
return
-
ret
;
}
*
res
=
new
;
return
0
;
fail:
__release_ptc
();
return
EAGAIN
;
}
weak_alias
(
__pthread_exit
,
pthread_exit
);
weak_alias
(
__pthread_create
,
pthread_create
);
struct
pthread
*
__pthread_list_find
(
pthread_t
thread_id
,
const
char
*
info
)
{
struct
pthread
*
thread
=
(
struct
pthread
*
)
thread_id
;
if
(
NULL
==
thread
)
{
log_print
(
"invalid pthread_t (0) passed to %s
\n
"
,
info
);
return
NULL
;
}
struct
pthread
*
self
=
__pthread_self
();
if
(
thread
==
self
)
{
return
thread
;
}
struct
pthread
*
t
=
self
;
t
=
t
->
next
;
while
(
t
!=
self
)
{
if
(
t
==
thread
)
return
thread
;
t
=
t
->
next
;
}
log_print
(
"invalid pthread_t %p passed to %s
\n
"
,
thread
,
info
);
return
NULL
;
}
pid_t
__pthread_gettid_np
(
pthread_t
t
)
{
__tl_lock
();
struct
pthread
*
thread
=
__pthread_list_find
(
t
,
"pthread_gettid_np"
);
__tl_unlock
();
return
thread
?
thread
->
tid
:
-
1
;
}
weak_alias
(
__pthread_gettid_np
,
pthread_gettid_np
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录