未验证 提交 1cb19e59 编写于 作者: B Bernard Xiong 提交者: GitHub

Merge pull request #5071 from liukangcc/master

[update] support armclang and c++11.
......@@ -4,4 +4,16 @@ config RT_USING_CPLUSPLUS
bool "Support C++ features"
default n
if RT_USING_CPLUSPLUS
config RT_USING_CPLUSPLUS11
bool "Enable c++11 feature support"
default n
select RT_USING_LIBC
select RT_USING_DFS
select RT_USING_PTHREADS
select RT_USING_RTC
endif
endmenu
# RT-Thread building script for component
from building import *
Import('rtconfig')
cwd = GetCurrentDir()
src = Glob('*.cpp') + Glob('*.c')
if GetDepend('RT_USING_CPLUSPLUS11'):
src += Glob('cpp11/*.cpp') + Glob('cpp11/*.c')
CPPPATH = [cwd]
if rtconfig.PLATFORM == 'armclang' and GetDepend('RT_USING_CPLUSPLUS11'):
src += Glob('cpp11/armclang/*.cpp') + Glob('cpp11/armclang/*.c')
CPPPATH += [cwd + '/cpp11/armclang']
elif rtconfig.PLATFORM == 'gcc' and GetDepend('RT_USING_CPLUSPLUS11'):
src += Glob('cpp11/gcc/*.cpp') + Glob('cpp11/gcc/*.c')
CPPPATH += [cwd + '/cpp11/gcc']
group = DefineGroup('CPlusPlus', src, depend = ['RT_USING_CPLUSPLUS'], CPPPATH = CPPPATH)
Return('group')
Return('group')
\ No newline at end of file
# C++ 11 support for RT-Thread
## Features
Here are some features about rt-thread c++11 threading futures.
- Atomic.
- Conditional variables.
- Clocks.
- Future.
- Mutexes.
- Threads.
- TLS.
## How To Use
Note that using C++ 11 in rt-thread requires modifying some of the files in the toolchain. Before modifying the tool, back up the tool chain.
1. Enable c++11 support
![](figures/Snipaste_2021-09-02_16-00-09.png)
2. Download toolchain GCC 10.2.1:
```shell
gcc version 10.2.1 20201103 (release) (GNU Arm Embedded Toolchain 10-2020-q4-major)
```
3. Delete the following files:
```shell
rm -f toolchain/arm-none-eabi/include/c++/10.2.1/thread
rm -f toolchain/arm-none-eabi/include/c++/10.2.1/mutex
rm -f toolchain/arm-none-eabi/include/c++/10.2.1/condition_variable
rm -f toolchain/arm-none-eabi/include/c++/10.2.1/future
rm -f toolchain/arm-none-eabi/include/pthread.h
```
4. Clear the contents of the following files and keep them to prevent compilation failures:
```shell
toolchain/arm-none-eabi/include/sys/_pthreadtypes.h
```
5. Update `rtconfig.py` file. add compilation parameters:
```shell
CXXFLAGS = CFLAGS + ' -std=c++11 -fabi-version=0 -MMD -MP -MF'
```
# cpp 11 support for rt-thread
## 特性
下面是 RT-Thread 支持的 C++ 11 线程特性。
- Atomic.
- Conditional variables.
- Clocks.
- Future.
- Mutexes.
- Threads.
- TLS.
## 如何使用
请注意,在 RT-Thread 中使用 C++ 11,需要修改工具链中的部分文件。请在修改之前,备份好工具链。
1. 使能 c++11
![](figures/Snipaste_2021-09-02_16-00-09.png)
2. 下载 GCC 工具链
```shell
gcc version 10.2.1 20201103 (release) (GNU Arm Embedded Toolchain 10-2020-q4-major)
```
3. 删除下面的文件
```shell
rm -f toolchain/arm-none-eabi/include/c++/10.2.1/thread
rm -f toolchain/arm-none-eabi/include/c++/10.2.1/mutex
rm -f toolchain/arm-none-eabi/include/c++/10.2.1/condition_variable
rm -f toolchain/arm-none-eabi/include/c++/10.2.1/future
rm -f toolchain/arm-none-eabi/include/pthread.h
```
4. 请清除下面文件的内容,保留文件避免编译失败
```shell
toolchain/arm-none-eabi/include/sys/_pthreadtypes.h
```
5. 更新 `rtconfig.py` 文件,添加 c++ 编译参数:
```shell
CXXFLAGS = CFLAGS + ' -std=c++11 -fabi-version=0 -MMD -MP -MF'
```
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include <arm-tpl.h>
#include <sys/time.h>
#include <rtthread.h>
extern "C" int __ARM_TPL_clock_realtime(__ARM_TPL_timespec_t* __ts)
{
unsigned int t = std::time(nullptr);
__ts->tv_sec = t;
__ts->tv_nsec = 0;
return 0;
}
extern "C" int __ARM_TPL_clock_monotonic(__ARM_TPL_timespec_t* __ts)
{
unsigned int t = rt_tick_get();
__ts->tv_sec = t / RT_TICK_PER_SECOND;
__ts->tv_nsec = (t %RT_TICK_PER_SECOND) * NANOSECOND_PER_TICK ;
return 0;
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include <arm-tpl.h>
#include "tpl.h"
#include <new>
#include <cstdint>
#include <stdatomic.h>
arm_tpl_cv::arm_tpl_cv()
{
s = rt_sem_create("semxs", 0, RT_IPC_FLAG_PRIO);
if (s == nullptr)
RT_ASSERT(0);
h = rt_sem_create("semxh", 0, RT_IPC_FLAG_PRIO);
if (h == nullptr)
{
rt_sem_delete(s);
RT_ASSERT(0);
}
x = rt_mutex_create("mutx", RT_IPC_FLAG_PRIO);
if (x == nullptr)
{
rt_sem_delete(s);
rt_sem_delete(h);
RT_ASSERT(0);
}
}
arm_tpl_cv::~arm_tpl_cv()
{
rt_mutex_delete(x);
rt_sem_delete(h);
rt_sem_delete(s);
}
void arm_tpl_cv::wait(rt_mutex_t lock, bool recursive)
{
while (rt_mutex_take(x, ARM_TPL_MAX_DELAY) != 0);
rt_sem_release(s);
rt_mutex_release(x);
if (recursive)
rt_mutex_release(lock);
else
rt_mutex_release(lock);
while (rt_sem_take(h, ARM_TPL_MAX_DELAY) != 0);
if (recursive)
while (rt_mutex_take(lock, ARM_TPL_MAX_DELAY) != 0);
else
while (rt_mutex_take(lock, ARM_TPL_MAX_DELAY) != 0);
}
int arm_tpl_cv::timedwait(rt_mutex_t lock, bool recursive, unsigned int timeout_ms)
{
int result = 0;
while (rt_mutex_take(x, ARM_TPL_MAX_DELAY) != 0);
rt_sem_release(s);
rt_mutex_release(x);
if (recursive)
rt_mutex_release(lock);
else
rt_mutex_release(lock);
if (rt_sem_take(h, rt_tick_from_millisecond(timeout_ms)) != 0)
{
while (rt_mutex_take(x, ARM_TPL_MAX_DELAY) != 0);
if (rt_sem_take(h, 0) != 0)
{
if (rt_sem_take(s, 0) != 0)
result = -1;
else
result = 1;
}
rt_mutex_release(x);
}
if (recursive)
while (rt_mutex_take(lock, ARM_TPL_MAX_DELAY) != 0);
else
while (rt_mutex_take(lock, ARM_TPL_MAX_DELAY) != 0);
return result;
}
void arm_tpl_cv::signal()
{
while (rt_mutex_take(x, ARM_TPL_MAX_DELAY) != 0);
if (rt_sem_take(s, 0) == 0)
rt_sem_release(h);
rt_mutex_release(x);
}
void arm_tpl_cv::broadcast()
{
while (rt_mutex_take(x, ARM_TPL_MAX_DELAY) != 0);
auto count = s->value;
for (auto i = 0; i < count; i++)
{
while (rt_sem_take(s, ARM_TPL_MAX_DELAY) != 0);
rt_sem_release(h);
}
rt_mutex_release(x);
}
static int check_create(volatile __ARM_TPL_condvar_t *__vcv)
{
if (__vcv->data == 0)
{
uintptr_t cv_new;
cv_new = reinterpret_cast<uintptr_t>(new arm_tpl_cv());
if (cv_new == 0)
{
return -1;
}
uintptr_t cv_null = 0;
if (!atomic_compare_exchange_strong(&__vcv->data, &cv_null, cv_new))
delete reinterpret_cast<arm_tpl_cv *>(cv_new);
}
return 0;
}
extern "C" int __ARM_TPL_condvar_wait(__ARM_TPL_condvar_t *__cv, __ARM_TPL_mutex_t *__m)
{
volatile __ARM_TPL_condvar_t *__vcv = __cv;
if (check_create(__vcv) != 0)
return -1;
struct arm_tpl_mutex_struct *tmutex = (struct arm_tpl_mutex_struct *)(__m->data);
((arm_tpl_cv *) __vcv->data)->wait(tmutex->mutex, tmutex->type == RECURSIVE);
return 0;
}
extern "C" int __ARM_TPL_condvar_timedwait(__ARM_TPL_condvar_t *__cv,
__ARM_TPL_mutex_t *__m,
__ARM_TPL_timespec_t *__ts)
{
volatile __ARM_TPL_condvar_t *__vcv = __cv;
if (check_create(__vcv) != 0)
return -1;
__ARM_TPL_timespec_t now;
if (__ARM_TPL_clock_realtime(&now) != 0)
return -1;
struct arm_tpl_mutex_struct *tmutex = (struct arm_tpl_mutex_struct *)(__m->data);
unsigned int timeout_ms = (__ts->tv_sec - now.tv_sec) * 1000 + (__ts->tv_nsec - now.tv_nsec) / 1000000;
if (((arm_tpl_cv *) __vcv->data)->timedwait(tmutex->mutex, tmutex->type == RECURSIVE, timeout_ms) < 0)
return -1;
return 0;
}
extern "C" int __ARM_TPL_condvar_signal(__ARM_TPL_condvar_t *__cv)
{
volatile __ARM_TPL_condvar_t *__vcv = __cv;
if (__vcv->data != 0)
((arm_tpl_cv *) __vcv->data)->signal();
return 0;
}
extern "C" int __ARM_TPL_condvar_broadcast(__ARM_TPL_condvar_t *__cv)
{
volatile __ARM_TPL_condvar_t *__vcv = __cv;
if (__vcv->data != 0)
((arm_tpl_cv *) __vcv->data)->broadcast();
return 0;
}
extern "C" int __ARM_TPL_condvar_destroy(__ARM_TPL_condvar_t *__cv)
{
volatile __ARM_TPL_condvar_t *__vcv = __cv;
if (__vcv->data != 0)
{
delete (arm_tpl_cv *) __vcv->data;
__vcv->data = 0;
}
return 0;
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include <arm-tpl.h>
extern "C" int __ARM_TPL_execute_once(__ARM_TPL_exec_once_flag *__flag,
void (*__init_routine)(void))
{
if (*__flag == 0)
{
__init_routine();
*__flag = 1;
}
return 0;
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include <arm-tpl.h>
#include <cstdint>
#include <stdatomic.h>
#include "tpl.h"
static int check_create(volatile __ARM_TPL_mutex_t *__vm, bool recursive = false)
{
if (__vm->data == 0)
{
uintptr_t mut_null = 0;
arm_tpl_mutex_struct *mutex_p = (arm_tpl_mutex_struct *)rt_malloc(sizeof(arm_tpl_mutex_struct));
if (mutex_p == nullptr) return -1;
if (recursive)
mutex_p->mutex = rt_mutex_create("mutexx", RT_IPC_FLAG_PRIO);
else
mutex_p->mutex = rt_mutex_create("mutexx", RT_IPC_FLAG_PRIO);
if (mutex_p->mutex == nullptr)
{
rt_free(mutex_p);
return -1;
}
mutex_p->type = recursive ? RECURSIVE : NORMAL;
uintptr_t mut_new = reinterpret_cast<uintptr_t>(mutex_p);
if (!atomic_compare_exchange_strong(&__vm->data, &mut_null, mut_new))
{
rt_mutex_delete(mutex_p->mutex);
rt_free(mutex_p);
}
}
return 0;
}
static int mutexLock(arm_tpl_mutex_struct *mutex_p, rt_tick_t timeOut)
{
if (mutex_p->type == RECURSIVE)
{
if (rt_mutex_take(mutex_p->mutex, timeOut) == 0)
return 0;
}
else
{
if (rt_mutex_take(mutex_p->mutex, timeOut) == 0)
return 0;
}
return -1;
}
static int mutexUnlock(arm_tpl_mutex_struct *mutex_p)
{
if (mutex_p->type == RECURSIVE)
rt_mutex_release(mutex_p->mutex);
else
rt_mutex_release(mutex_p->mutex);
return 0;
}
extern "C" int __ARM_TPL_recursive_mutex_init(__ARM_TPL_mutex_t *__m)
{
volatile __ARM_TPL_mutex_t *__vm = __m;
return check_create(__vm, true);
}
extern "C" int __ARM_TPL_mutex_lock(__ARM_TPL_mutex_t *__m)
{
volatile __ARM_TPL_mutex_t *__vm = __m;
if (check_create(__vm))
return -1;
while (mutexLock((arm_tpl_mutex_struct *)(__vm->data), ARM_TPL_MAX_DELAY) != 0);
return 0;
}
extern "C" int __ARM_TPL_mutex_trylock(__ARM_TPL_mutex_t *__m)
{
volatile __ARM_TPL_mutex_t *__vm = __m;
if (check_create(__vm))
return -1;
return mutexLock((arm_tpl_mutex_struct *)(__vm->data), 0);
}
extern "C" int __ARM_TPL_mutex_unlock(__ARM_TPL_mutex_t *__m)
{
volatile __ARM_TPL_mutex_t *__vm = __m;
return mutexUnlock((arm_tpl_mutex_struct *)(__vm->data));
}
extern "C" int __ARM_TPL_mutex_destroy(__ARM_TPL_mutex_t *__m)
{
volatile __ARM_TPL_mutex_t *__vm = __m;
if (__vm->data != 0)
{
rt_mutex_delete(((arm_tpl_mutex_struct *)(__vm->data))->mutex);
rt_free((void *)(__vm->data));
__vm->data = 0;
}
return 0;
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include <arm-tpl.h>
#include "tpl.h"
#include <cstdio>
#include <pthread.h>
extern "C" int __ARM_TPL_thread_create(__ARM_TPL_thread_t *__t,
void *(*__func)(void *),
void *__arg)
{
int ret = 0;
/* TODO memory leek */
pthread_t *pid = (pthread_t *)rt_malloc(sizeof(pthread_t));
if (pid == nullptr)
return -1;
ret = pthread_create(pid, RT_NULL, __func, __arg);
if (ret == 0)
{
__t->data = (std::uintptr_t)pid;
return 0;
}
return -1;
}
extern "C" int __ARM_TPL_thread_id_compare(__ARM_TPL_thread_id __tid1,
__ARM_TPL_thread_id __tid2)
{
if (__tid1 > __tid2)
return 1;
else if (__tid1 < __tid2)
return -1;
else
return 0;
}
extern "C" __ARM_TPL_thread_id __ARM_TPL_thread_get_current_id()
{
return (__ARM_TPL_thread_id)pthread_self();
}
extern "C" __ARM_TPL_thread_id __ARM_TPL_thread_get_id(
const __ARM_TPL_thread_t *__t)
{
return (__ARM_TPL_thread_id)((*(pthread_t *)__t->data));
}
extern "C" int __ARM_TPL_thread_join(__ARM_TPL_thread_t *__t)
{
pthread_join((*(pthread_t *)__t->data), RT_NULL);
return 0;
}
extern "C" int __ARM_TPL_thread_detach(__ARM_TPL_thread_t *__t)
{
pthread_detach((*(pthread_t *)__t->data));
return 0;
}
extern "C" void __ARM_TPL_thread_yield()
{
rt_thread_yield();
}
extern "C" int __ARM_TPL_thread_nanosleep(const __ARM_TPL_timespec_t *__req,
__ARM_TPL_timespec_t *__rem)
{
return nanosleep(__req, rem);
}
extern "C" unsigned __ARM_TPL_thread_hw_concurrency()
{
return 1;
}
extern "C" int __ARM_TPL_tls_create(__ARM_TPL_tls_key *__key,
void (*__at_exit)(void *))
{
pthread_key_t key;
if (pthread_key_create(&key, __at_exit) == 0)
{
*__key = key;
return 0;
}
return -1;
}
extern "C" void *__ARM_TPL_tls_get(__ARM_TPL_tls_key __key)
{
return pthread_getspecific(__key);
}
extern "C" int __ARM_TPL_tls_set(__ARM_TPL_tls_key __key, void *__p)
{
if (pthread_setspecific(__key, (void*)__p) != 0)
{
return -1;
}
return 0;
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#pragma once
#ifndef __cplusplus
void ARMTPLInit();
#else
#include <rtthread.h>
#define ARM_TPL_MAX_DELAY 1000
#define ARM_TPL_THREAD_STACK_SIZE 4096
enum arm_tpl_mutex_type
{
NORMAL,
RECURSIVE,
};
struct arm_tpl_mutex_struct
{
rt_mutex_t mutex;
arm_tpl_mutex_type type;
};
struct arm_tpl_thread_struct
{
rt_thread_t task;
void *(*func)(void *);
void *arg;
rt_sem_t join_sem;
rt_sem_t detach_sem;
};
class arm_tpl_cv
{
public:
arm_tpl_cv();
~arm_tpl_cv();
void wait(rt_mutex_t lock, bool recursive);
int timedwait(rt_mutex_t lock, bool recursive, unsigned int timeout_ms);
void signal();
void broadcast();
private:
rt_sem_t s;
rt_sem_t h;
rt_mutex_t x;
};
#endif
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 peterfan Add copyright header.
*/
#include <rthw.h>
#include <stdint.h>
#include <stdbool.h>
/*
* override gcc builtin atomic function for std::atomic<int64_t>, std::atomic<uint64_t>
* @see https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
*/
uint64_t __atomic_load_8(volatile void *ptr, int memorder)
{
volatile uint64_t *val_ptr = (volatile uint64_t *)ptr;
register rt_base_t level;
uint64_t tmp;
level = rt_hw_interrupt_disable();
tmp = *val_ptr;
rt_hw_interrupt_enable(level);
return tmp;
}
void __atomic_store_8(volatile void *ptr, uint64_t val, int memorder)
{
volatile uint64_t *val_ptr = (volatile uint64_t *)ptr;
register rt_base_t level;
level = rt_hw_interrupt_disable();
*val_ptr = val;
rt_hw_interrupt_enable(level);
}
uint64_t __atomic_exchange_8(volatile void *ptr, uint64_t val, int memorder)
{
volatile uint64_t *val_ptr = (volatile uint64_t *)ptr;
register rt_base_t level;
uint64_t tmp;
level = rt_hw_interrupt_disable();
tmp = *val_ptr;
*val_ptr = val;
rt_hw_interrupt_enable(level);
return tmp;
}
bool __atomic_compare_exchange_8(volatile void *ptr, volatile void *expected, uint64_t desired, bool weak, int success_memorder, int failure_memorder)
{
volatile uint64_t *val_ptr = (volatile uint64_t *)ptr;
volatile uint64_t *expected_ptr = (volatile uint64_t *)expected;
register rt_base_t level;
bool exchanged;
level = rt_hw_interrupt_disable();
if (*val_ptr == *expected_ptr)
{
*val_ptr = desired;
exchanged = true;
}
else
{
*expected_ptr = *val_ptr;
exchanged = false;
}
rt_hw_interrupt_enable(level);
return exchanged;
}
#define __atomic_fetch_op_8(OPNAME, OP) \
uint64_t __atomic_fetch_##OPNAME##_8(volatile void *ptr, uint64_t val, int memorder) {\
volatile uint64_t* val_ptr = (volatile uint64_t*)ptr;\
register rt_base_t level;\
uint64_t tmp;\
level = rt_hw_interrupt_disable();\
tmp = *val_ptr;\
*val_ptr OP##= val;\
rt_hw_interrupt_enable(level);\
return tmp;\
}
__atomic_fetch_op_8(add, +)
__atomic_fetch_op_8(sub, -)
__atomic_fetch_op_8( and, &)
__atomic_fetch_op_8( or, |)
__atomic_fetch_op_8(xor, ^)
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 peterfan Add copyright header.
*/
/* ===---------- emutls.c - Implements __emutls_get_address ---------------===
*
* The LLVM Compiler Infrastructure
*
* This file is dual licensed under the MIT and the University of Illinois Open
* Source Licenses. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*/
#include <pthread.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#define COMPILE_TIME_ASSERT(x)
extern int pthread_key_create(pthread_key_t *key, void (*destructor)(void *));
extern int pthread_key_delete(pthread_key_t key);
extern void *pthread_getspecific(pthread_key_t key);
extern int pthread_setspecific(pthread_key_t key, const void *value);
/* Default is not to use posix_memalign, so systems like Android
* can use thread local data without heavier POSIX memory allocators.
*/
#ifndef EMUTLS_USE_POSIX_MEMALIGN
#define EMUTLS_USE_POSIX_MEMALIGN 0
#endif
/* For every TLS variable xyz,
* there is one __emutls_control variable named __emutls_v.xyz.
* If xyz has non-zero initial value, __emutls_v.xyz's "value"
* will point to __emutls_t.xyz, which has the initial value.
*/
typedef struct __emutls_control
{
size_t size; /* size of the object in bytes */
size_t align; /* alignment of the object in bytes */
union
{
uintptr_t index; /* data[index-1] is the object address */
void *address; /* object address, when in single thread env */
} object;
void *value; /* null or non-zero initial value for the object */
} __emutls_control;
static __inline void *emutls_memalign_alloc(size_t align, size_t size)
{
void *base;
#if EMUTLS_USE_POSIX_MEMALIGN
if (posix_memalign(&base, align, size) != 0)
abort();
#else
#define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void *))
char *object;
if ((object = malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
abort();
base = (void *)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES)) & ~(uintptr_t)(align - 1));
((void **)base)[-1] = object;
#endif
return base;
}
static __inline void emutls_memalign_free(void *base)
{
#if EMUTLS_USE_POSIX_MEMALIGN
free(base);
#else
/* The mallocated address is in ((void**)base)[-1] */
free(((void **)base)[-1]);
#endif
}
/* Emulated TLS objects are always allocated at run-time. */
static __inline void *emutls_allocate_object(__emutls_control *control)
{
/* Use standard C types, check with gcc's emutls.o. */
typedef unsigned int gcc_word __attribute__((mode(word)));
typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
COMPILE_TIME_ASSERT(sizeof(size_t) == sizeof(gcc_word));
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(gcc_pointer));
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void *));
size_t size = control->size;
size_t align = control->align;
if (align < sizeof(void *))
align = sizeof(void *);
/* Make sure that align is power of 2. */
if ((align & (align - 1)) != 0)
abort();
void *base = emutls_memalign_alloc(align, size);
if (control->value)
memcpy(base, control->value, size);
else
memset(base, 0, size);
return base;
}
static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
static size_t emutls_num_object = 0; /* number of allocated TLS objects */
typedef struct emutls_address_array
{
uintptr_t size; /* number of elements in the 'data' array */
void *data[];
} emutls_address_array;
static pthread_key_t emutls_pthread_key;
static void emutls_key_destructor(void *ptr)
{
emutls_address_array *array = (emutls_address_array *)ptr;
uintptr_t i;
for (i = 0; i < array->size; ++i)
{
if (array->data[i])
emutls_memalign_free(array->data[i]);
}
free(ptr);
}
static void emutls_init(void)
{
if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
abort();
}
/* Returns control->object.index; set index if not allocated yet. */
static __inline uintptr_t emutls_get_index(__emutls_control *control)
{
uintptr_t index = __atomic_load_n(&control->object.index, __ATOMIC_ACQUIRE);
if (!index)
{
static pthread_once_t once = PTHREAD_ONCE_INIT;
pthread_once(&once, emutls_init);
pthread_mutex_lock(&emutls_mutex);
index = control->object.index;
if (!index)
{
index = ++emutls_num_object;
__atomic_store_n(&control->object.index, index, __ATOMIC_RELEASE);
}
pthread_mutex_unlock(&emutls_mutex);
}
return index;
}
/* Updates newly allocated thread local emutls_address_array. */
static __inline void emutls_check_array_set_size(emutls_address_array *array,
uintptr_t size)
{
if (array == NULL)
abort();
array->size = size;
pthread_setspecific(emutls_pthread_key, (void *)array);
}
/* Returns the new 'data' array size, number of elements,
* which must be no smaller than the given index.
*/
static __inline uintptr_t emutls_new_data_array_size(uintptr_t index)
{
/* Need to allocate emutls_address_array with one extra slot
* to store the data array size.
* Round up the emutls_address_array size to multiple of 16.
*/
return ((index + 1 + 15) & ~((uintptr_t)15)) - 1;
}
/* Returns the thread local emutls_address_array.
* Extends its size if necessary to hold address at index.
*/
static __inline emutls_address_array *
emutls_get_address_array(uintptr_t index)
{
emutls_address_array *array = pthread_getspecific(emutls_pthread_key);
if (array == NULL)
{
uintptr_t new_size = emutls_new_data_array_size(index);
array = calloc(new_size + 1, sizeof(void *));
emutls_check_array_set_size(array, new_size);
}
else if (index > array->size)
{
uintptr_t orig_size = array->size;
uintptr_t new_size = emutls_new_data_array_size(index);
array = realloc(array, (new_size + 1) * sizeof(void *));
if (array)
memset(array->data + orig_size, 0,
(new_size - orig_size) * sizeof(void *));
emutls_check_array_set_size(array, new_size);
}
return array;
}
void *__emutls_get_address(void *control)
{
uintptr_t index = emutls_get_index((__emutls_control *)control);
emutls_address_array *array = emutls_get_address_array(index);
if (array->data[index - 1] == NULL)
array->data[index - 1] = emutls_allocate_object((__emutls_control *)control);
return array->data[index - 1];
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#pragma once
#include <cstdlib>
#include <system_error>
#include <chrono>
#include <ratio>
#include <rtthread.h>
#define RT_USING_CPP_EXCEPTION
inline void throw_system_error(int err, const char *what_msg)
{
#ifdef RT_USING_CPP_EXCEPTION
throw std::system_error(std::error_code(err, std::system_category()), what_msg);
#else
(void)err;
(void)what_msg;
::abort();
#endif
}
class tick_clock
{
public:
typedef clock_t rep;
typedef std::ratio<1, RT_TICK_PER_SECOND> period;
typedef std::chrono::duration<tick_clock::rep, tick_clock::period> duration;
typedef std::chrono::time_point<tick_clock> time_point;
constexpr static bool is_ready = true;
static time_point now();
};
class real_time_clock
{
public:
typedef std::chrono::nanoseconds duration;
typedef duration::rep rep;
typedef duration::period period;
typedef std::chrono::time_point<real_time_clock, duration> time_point;
static constexpr bool is_steady = true;
static time_point
now() noexcept;
};
#pragma once
#if __cplusplus < 201103L
#error "C++ version lower than C++11"
#endif
#include <pthread.h>
#include <system_error>
#include <chrono>
#include <utility>
#include <functional>
#include <memory>
#include "__utils.h"
#include "mutex"
#define rt_cpp_cond_var pthread_cond_t
namespace std
{
enum class cv_status
{
no_timeout,
timeout
};
class condition_variable
{
public:
typedef rt_cpp_cond_var *native_handle_type;
condition_variable(const condition_variable &) = delete;
condition_variable &operator=(const condition_variable &) = delete;
condition_variable() = default;
~condition_variable()
{
pthread_cond_destroy(&_m_cond);
}
void wait(unique_lock<mutex> &lock);
void notify_one() noexcept
{
pthread_cond_signal(&_m_cond);
}
void notify_all() noexcept
{
pthread_cond_broadcast(&_m_cond);
}
template <class Predicate>
void wait(unique_lock<mutex> &lock, Predicate pred)
{
while (!pred())
wait(lock);
}
template <class Clock, class Duration>
cv_status wait_until(unique_lock<mutex> &lock,
const chrono::time_point<Clock, Duration> &abs_time)
{
if (!lock.owns_lock())
throw_system_error((int)errc::operation_not_permitted,
"condition_variable::wailt_until: waiting on unlocked lock");
auto secs = chrono::time_point_cast<chrono::seconds>(abs_time);
auto nano_secs = chrono::duration_cast<chrono::nanoseconds>(abs_time - secs);
struct timespec c_abs_time = {static_cast<time_t>(secs.time_since_epoch().count()),
static_cast<long>(nano_secs.count())};
pthread_cond_timedwait(&_m_cond, lock.mutex()->native_handle(), &c_abs_time);
return (Clock::now() < abs_time) ? cv_status::no_timeout : cv_status::timeout;
}
template <class Clock, class Duration, class Predicate>
bool wait_until(unique_lock<mutex> &lock,
const chrono::time_point<Clock, Duration> &abs_time,
Predicate pred)
{
while (!pred())
if (wait_until(lock, abs_time) == cv_status::timeout)
return pred();
return true;
}
template <class Rep, class Period>
cv_status wait_for(unique_lock<mutex> &lock,
const chrono::duration<Rep, Period> &rel_time)
{
return wait_until(lock, real_time_clock::now() + rel_time);
}
template <class Rep, class Period, class Predicate>
bool wait_for(unique_lock<mutex> &lock,
const chrono::duration<Rep, Period> &rel_time,
Predicate pred)
{
return wait_until(lock, real_time_clock::now() + rel_time, std::move(pred));
}
native_handle_type native_handle()
{
return &_m_cond;
}
private:
rt_cpp_cond_var _m_cond = PTHREAD_COND_INITIALIZER;
};
// Lockable is only required to have `lock()` and `unlock()`
class condition_variable_any
{
private:
condition_variable _m_cond;
shared_ptr<mutex> _m_mtx;
// so that Lockable automatically unlocks when waiting and locks after waiting
template <class Lockable>
struct unlocker
{
Lockable &_m_lock;
explicit unlocker(Lockable &lk)
: _m_lock(lk)
{
_m_lock.unlock();
}
~unlocker()
{
_m_lock.lock();
}
unlocker(const unlocker &) = delete;
unlocker &operator=(const unlocker &) = delete;
};
public:
condition_variable_any() : _m_mtx(std::make_shared<mutex>()) {}
~condition_variable_any() = default;
condition_variable_any(const condition_variable_any &) = delete;
condition_variable_any &operator=(const condition_variable_any &) = delete;
void notify_one() noexcept
{
lock_guard<mutex> lk(*_m_mtx);
_m_cond.notify_one();
}
void notify_all() noexcept
{
lock_guard<mutex> lk(*_m_mtx);
_m_cond.notify_all();
}
template <class Lock>
void wait(Lock &lock)
{
shared_ptr<mutex> mut = _m_mtx;
unique_lock<mutex> lk(*mut);
unlocker<Lock> auto_lk(lock); // unlock here
unique_lock<mutex> lk2(std::move(lk));
_m_cond.wait(lk2);
} // mut.unlock(); lock.lock();
template <class Lock, class Predicate>
void wait(Lock &lock, Predicate pred)
{
while (!pred())
wait(lock);
}
template <class Lock, class Clock, class Duration>
cv_status wait_until(Lock &lock,
const chrono::time_point<Clock, Duration> &abs_time)
{
shared_ptr<mutex> mut = _m_mtx;
unique_lock<mutex> lk(*mut);
unlocker<Lock> auto_lk(lock); // unlock here
unique_lock<mutex> lk2(std::move(lk));
return _m_cond.wait_until(lk2, abs_time);
}
template <class Lock, class Clock, class Duration, class Predicate>
bool wait_until(Lock &lock,
const chrono::time_point<Clock, Duration> &abs_time,
Predicate pred)
{
while (!pred())
if (wait_until(lock, abs_time) == cv_status::timeout)
return pred();
return true;
}
template <class Lock, class Rep, class Period>
cv_status wait_for(Lock &lock,
const chrono::duration<Rep, Period> &rel_time)
{
return wait_until(lock, real_time_clock::now() + rel_time);
}
template <class Lock, class Rep, class Period, class Predicate>
bool wait_for(Lock &lock,
const chrono::duration<Rep, Period> &rel_time,
Predicate pred)
{
return wait_until(lock, real_time_clock::now() + rel_time, std::move(pred));
}
};
void notify_all_at_thread_exit(condition_variable &cond, unique_lock<mutex> lk);
} // namespace std
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include "condition_variable"
namespace std
{
void condition_variable::wait(unique_lock<mutex>& lock)
{
int err = pthread_cond_wait(&_m_cond, lock.mutex()->native_handle());
if (err)
{
throw_system_error(err, "condition_variable::wait: failed to wait on a condition");
}
}
void notify_all_at_thread_exit(condition_variable& cond, unique_lock<mutex> lk)
{
// TLS currently not available
mutex* mut = lk.release();
mut->unlock();
cond.notify_all();
}
} // namespace std
#pragma once
#if __cplusplus < 201103L
#error "C++ version lower than C++11"
#endif
#include <mutex>
#include <condition_variable>
#include <memory>
#include <chrono>
#include <cassert>
namespace std {
enum class future_status {
ready,
timeout,
deferred
};
namespace detail {
class shared_state_base {
protected:
typedef void (*deleter_fn)(void *v);
using scoped_lock = std::lock_guard<std::mutex>;
using unique_lock = std::unique_lock<std::mutex>;
public:
explicit shared_state_base(deleter_fn d) : v_(nullptr), d_(d), valid_(true) {}
~shared_state_base() { d_(v_); }
shared_state_base(shared_state_base &&other) = delete;
shared_state_base(const shared_state_base &other) = delete;
shared_state_base &operator=(shared_state_base &&other) = delete;
shared_state_base &operator=(const shared_state_base &other) = delete;
void wait() {
unique_lock lock(m_);
c_.wait(lock, [this] { return has_value(); });
}
template <class Rep, class Period>
std::future_status
wait_for(const std::chrono::duration<Rep, Period> &rel_time) {
unique_lock lock(m_);
if (c_.wait_for(lock, rel_time, [this] { return has_value(); })) {
return std::future_status::ready;
}
return std::future_status::timeout;
}
template <class Clock, class Duration>
std::future_status
wait_until(const std::chrono::time_point<Clock, Duration> &abs_time) {
unique_lock lock(m_);
if (c_.wait_until(lock, abs_time, [this] { return has_value(); })) {
return std::future_status::ready;
}
return std::future_status::timeout;
}
protected:
bool has_value() { return v_ != nullptr; }
protected:
std::mutex m_;
std::condition_variable c_;
void *v_;
deleter_fn d_;
bool valid_;
};
template <typename R>
class shared_state: public shared_state_base {
public:
shared_state() :shared_state_base(default_deleter_) {}
~shared_state() {}
R &get() {
wait();
scoped_lock lock(m_);
assert(valid_);
valid_ = false;
return *(static_cast<R *>(v_));
}
void set(const R &v) {
scoped_lock lock(m_);
assert(!has_value());
v_ = new R(v);
valid_ = true;
c_.notify_one();
}
void set(R &&v) {
scoped_lock lock(m_);
assert(!has_value());
v_ = new R(std::move(v));
valid_ = true;
c_.notify_one();
}
bool valid() {
scoped_lock lock(m_);
return valid_;
}
private:
static void default_deleter_(void *v) { delete static_cast<R *>(v); }
};
} // namespace detail
template <typename R>
class shared_future {
};
template <typename R>
class future {
using state_type = std::shared_ptr<detail::shared_state<R>>;
public:
future() {}
explicit future(const state_type &state) : state_(state) {}
future(future &&other) noexcept: state_(std::move(other.state_)) {
other.state_.reset();
}
future(const future &other) = delete;
~future() {}
future &operator=(future &&other) noexcept {
if (&other != this) {
state_ = std::move(other.state_);
other.state_.reset();
}
return *this;
}
future &operator=(const future &other) = delete;
void swap(future &other) noexcept {
std::swap(state_, other.state_);
}
std::shared_future<R> share() noexcept { return std::shared_future<R>(); }
R get() { return state_->get(); }
bool valid() const noexcept { return state_->valid(); }
void wait() const { state_->wait(); }
template <class Rep, class Period>
std::future_status
wait_for(const std::chrono::duration<Rep, Period> &rel_time) const {
return state_->wait_for(rel_time);
}
template <class Clock, class Duration>
std::future_status
wait_until(const std::chrono::time_point<Clock, Duration> &abs_time) const {
return state_->wait_until(abs_time);
}
private:
state_type state_;
};
template <>
class future<void> {
using state_type = std::shared_ptr<detail::shared_state<int>>;
public:
future() {}
explicit future(const state_type &state) : state_(state) {}
future(future &&other) noexcept: state_(std::move(other.state_)) {
other.state_.reset();
}
future(const future &other) = delete;
~future() {}
future &operator=(future &&other) noexcept {
if (&other != this) {
state_ = std::move(other.state_);
other.state_.reset();
}
return *this;
}
future &operator=(const future &other) = delete;
void swap(future &other) noexcept {
std::swap(state_, other.state_);
}
std::shared_future<void> share() noexcept { return std::shared_future<void>(); }
void get() { state_->get(); }
bool valid() const noexcept { return state_->valid(); }
void wait() const { state_->wait(); }
template <class Rep, class Period>
std::future_status
wait_for(const std::chrono::duration<Rep, Period> &rel_time) const {
return state_->wait_for(rel_time);
}
template <class Clock, class Duration>
std::future_status
wait_until(const std::chrono::time_point<Clock, Duration> &abs_time) const {
return state_->wait_until(abs_time);
}
private:
state_type state_;
};
template <typename R>
class promise {
using state_type = std::shared_ptr<detail::shared_state<R>>;
public:
promise() : state_(new detail::shared_state<R>()) {}
promise(promise &&other) noexcept: state_(std::move(other.state_)) {
other.state_.reset();
}
promise(const promise &other) = delete;
~promise() {}
promise &operator=(promise &&other) noexcept {
if (&other != this) {
state_ = std::move(other.state_);
other.state_.reset();
}
return *this;
}
promise &operator=(const promise &other) = delete;
void swap(promise &other) noexcept {
std::swap(state_, other.state_);
}
std::future<R> get_future() { return std::future<R>(state_); }
void set_value(const R &value) { state_->set(value); }
void set_value(R &&value) { state_->set(std::move(value)); }
void set_value_at_thread_exit(const R &value);
void set_value_at_thread_exit(R &&value);
void set_exception(std::exception_ptr p);
void set_exception_at_thread_exit(std::exception_ptr p);
private:
state_type state_;
};
template <>
class promise<void> {
using state_type = std::shared_ptr<detail::shared_state<int>>;
public:
promise() : state_(new detail::shared_state<int>()) {}
promise(promise &&other) noexcept: state_(std::move(other.state_)) {
other.state_.reset();
}
promise(const promise &other) = delete;
~promise() {}
promise &operator=(promise &&other) noexcept {
if (&other != this) {
state_ = std::move(other.state_);
other.state_.reset();
}
return *this;
}
promise &operator=(const promise &other) = delete;
void swap(promise &other) noexcept {
std::swap(state_, other.state_);
}
std::future<void> get_future() { return std::future<void>(state_); }
void set_value() { state_->set(0); }
void set_value_at_thread_exit();
void set_exception(std::exception_ptr p);
void set_exception_at_thread_exit(std::exception_ptr p);
private:
state_type state_;
};
template <class R>
void swap(std::future<R> &lhs, std::future<R> &rhs) noexcept {
lhs.swap(rhs);
}
template <class R>
void swap(std::promise<R> &lhs, std::promise<R> &rhs) noexcept {
lhs.swap(rhs);
}
} // namespace std
#pragma once
#if __cplusplus < 201103L
#error "C++ version lower than C++11"
#endif
//#if defined(RT_USING_LIBC) && defined(RT_USING_PTHREADS)
#include <pthread.h>
#include <system_error>
#include <chrono>
#include <utility>
#include <functional>
#include "__utils.h"
#define rt_cpp_mutex_t pthread_mutex_t
namespace std
{
// Base class on which to build std::mutex and std::timed_mutex
class __mutex_base
{
protected:
typedef rt_cpp_mutex_t __native_type;
__native_type _m_mutex = PTHREAD_MUTEX_INITIALIZER;
constexpr __mutex_base() noexcept = default;
__mutex_base(const __mutex_base&) = delete;
__mutex_base& operator=(const __mutex_base&) = delete;
};
class mutex : private __mutex_base
{
public:
constexpr mutex() = default;
~mutex() = default;
mutex(const mutex&) = delete;
mutex& operator=(const mutex&) = delete;
void lock()
{
int err = pthread_mutex_lock(&_m_mutex);
if (err)
{
throw_system_error(err, "mutex:lock failed.");
}
}
bool try_lock() noexcept
{
return !pthread_mutex_trylock(&_m_mutex);
}
void unlock() noexcept
{
pthread_mutex_unlock(&_m_mutex);
}
typedef __native_type* native_handle_type;
native_handle_type native_handle()
{
return &_m_mutex;
};
};
inline int __rt_cpp_recursive_mutex_init(rt_cpp_mutex_t* m)
{
pthread_mutexattr_t attr;
int res;
res = pthread_mutexattr_init(&attr);
if (res)
return res;
res = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
if (res)
goto attr_cleanup;
res = pthread_mutex_init(m, &attr);
attr_cleanup:
int err = pthread_mutexattr_destroy(&attr);
return res ? res : err;
}
class __recursive_mutex_base
{
protected:
typedef rt_cpp_mutex_t __native_type;
__native_type _m_recursive_mutex;
__recursive_mutex_base(const __recursive_mutex_base&) = delete;
__recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
__recursive_mutex_base()
{
int err = __rt_cpp_recursive_mutex_init(&_m_recursive_mutex);
if (err)
throw_system_error(err, "Recursive mutex failed to construct");
}
~__recursive_mutex_base()
{
pthread_mutex_destroy(&_m_recursive_mutex);
}
};
class recursive_mutex : private __recursive_mutex_base
{
public:
typedef __native_type* native_handle_type;
recursive_mutex() = default;
~recursive_mutex() = default;
recursive_mutex(const recursive_mutex&) = delete;
recursive_mutex& operator=(const recursive_mutex&) = delete;
void lock()
{
int err = pthread_mutex_lock(&_m_recursive_mutex);
if (err)
throw_system_error(err, "recursive_mutex::lock failed");
}
bool try_lock() noexcept
{
return !pthread_mutex_trylock(&_m_recursive_mutex);
}
void unlock() noexcept
{
pthread_mutex_unlock(&_m_recursive_mutex);
}
native_handle_type native_handle()
{ return &_m_recursive_mutex; }
};
#ifdef RT_PTHREAD_TIMED_MUTEX
class timed_mutex;
class recursive_timed_mutex;
#endif // RT_PTHREAD_TIMED_MUTEX
struct defer_lock_t {};
struct try_to_lock_t {};
struct adopt_lock_t {}; // take ownership of a locked mtuex
constexpr defer_lock_t defer_lock { };
constexpr try_to_lock_t try_to_lock { };
constexpr adopt_lock_t adopt_lock { };
template <class Mutex>
class lock_guard
{
public:
typedef Mutex mutex_type;
explicit lock_guard(mutex_type& m) : pm(m) { pm.lock(); }
lock_guard(mutex_type& m, adopt_lock_t) noexcept : pm(m)
{ }
~lock_guard()
{ pm.unlock(); }
lock_guard(lock_guard const&) = delete;
lock_guard& operator=(lock_guard const&) = delete;
private:
mutex_type& pm;
};
template <class Mutex>
class unique_lock
{
public:
typedef Mutex mutex_type;
unique_lock() noexcept : pm(nullptr), owns(false) { }
explicit unique_lock(mutex_type& m)
: pm(std::addressof(m)), owns(false)
{
lock();
owns = true;
}
unique_lock(mutex_type& m, defer_lock_t) noexcept
: pm(std::addressof(m)), owns(false)
{ }
unique_lock(mutex_type& m, try_to_lock_t) noexcept
: pm(std::addressof(m)), owns(pm->try_lock())
{ }
unique_lock(mutex_type& m, adopt_lock_t) noexcept
: pm(std::addressof(m)), owns(true)
{ }
// any lock-involving timed mutex API is currently only for custom implementations
// the standard ones are not available
template <class Clock, class Duration>
unique_lock(mutex_type& m, const chrono::time_point<Clock, Duration>& abs_time) noexcept
: pm(std::addressof(m)), owns(pm->try_lock_until(abs_time))
{ }
template <class Rep, class Period>
unique_lock(mutex_type& m, const chrono::duration<Rep, Period>& rel_time) noexcept
: pm(std::addressof(m)), owns(pm->try_lock_for(rel_time))
{ }
~unique_lock()
{
if (owns)
unlock();
}
unique_lock(unique_lock const&) = delete;
unique_lock& operator=(unique_lock const&) = delete;
unique_lock(unique_lock&& u) noexcept
: pm(u.pm), owns(u.owns)
{
u.pm = nullptr;
u.owns = false;
}
unique_lock& operator=(unique_lock&& u) noexcept
{
if (owns)
unlock();
unique_lock(std::move(u)).swap(*this);
u.pm = nullptr;
u.owns = false;
return *this;
}
void lock()
{
if (!pm)
throw_system_error(int(errc::operation_not_permitted),
"unique_lock::lock: references null mutex");
else if (owns)
throw_system_error(int(errc::resource_deadlock_would_occur),
"unique_lock::lock: already locked" );
else {
pm->lock();
owns = true;
}
}
bool try_lock()
{
if (!pm)
throw_system_error(int(errc::operation_not_permitted),
"unique_lock::try_lock: references null mutex");
else if (owns)
throw_system_error(int(errc::resource_deadlock_would_occur),
"unique_lock::try_lock: already locked" );
else {
owns = pm->try_lock();
}
return owns;
}
template <class Rep, class Period>
bool try_lock_for(const chrono::duration<Rep, Period>& rel_time)
{
if (!pm)
throw_system_error(int(errc::operation_not_permitted),
"unique_lock::try_lock_for: references null mutex");
else if (owns)
throw_system_error(int(errc::resource_deadlock_would_occur),
"unique_lock::try_lock_for: already locked");
else {
owns = pm->try_lock_for(rel_time);
}
return owns;
}
template <class Clock, class Duration>
bool try_lock_until(const chrono::time_point<Clock, Duration>& abs_time)
{
if (!pm)
throw_system_error(int(errc::operation_not_permitted),
"unique_lock::try_lock_until: references null mutex");
else if (owns)
throw_system_error(int(errc::resource_deadlock_would_occur),
"unique_lock::try_lock_until: already locked");
else {
owns = pm->try_lock_until(abs_time);
}
return owns;
}
void unlock()
{
if (!owns)
throw_system_error(int(errc::operation_not_permitted),
"unique_lock::unlock: not locked");
else {
pm->unlock();
owns = false;
}
}
void swap(unique_lock& u) noexcept
{
std::swap(pm, u.pm);
std::swap(owns, u.owns);
}
mutex_type *release() noexcept
{
mutex_type* ret_mutex = pm;
pm = nullptr;
owns = false;
return ret_mutex;
}
bool owns_lock() const noexcept
{ return owns; }
explicit operator bool() const noexcept
{ return owns_lock(); }
mutex_type* mutex() const noexcept
{ return pm; }
private:
mutex_type *pm;
bool owns;
};
template <class Mutex>
void swap(unique_lock<Mutex>& x, unique_lock<Mutex>& y)
{
x.swap(y);
}
template <class L0, class L1>
int try_lock(L0& l0, L1& l1)
{
unique_lock<L0> u0(l0, try_to_lock); // try to lock the first Lockable
// using unique_lock since we don't want to unlock l0 manually if l1 fails to lock
if (u0.owns_lock())
{
if (l1.try_lock()) // lock the second one
{
u0.release(); // do not let RAII of a unique_lock unlock l0
return -1;
}
else
return 1;
}
return 0;
}
template <class L0, class L1, class L2, class... L3>
int try_lock(L0& l0, L1& l1, L2& l2, L3&... l3)
{
int r = 0;
unique_lock<L0> u0(l0, try_to_lock);
// automatically unlock is done through RAII of unique_lock
if (u0.owns_lock())
{
r = try_lock(l1, l2, l3...);
if (r == -1)
u0.release();
else
++r;
}
return r;
}
template <class L0, class L1, class L2, class ...L3>
void
__lock_first(int i, L0& l0, L1& l1, L2& l2, L3&... l3)
{
while (true)
{
// we first lock the one that is the most difficult to lock
switch (i)
{
case 0:
{
unique_lock<L0> u0(l0);
i = try_lock(l1, l2, l3...);
if (i == -1)
{
u0.release();
return;
}
}
++i;
sched_yield();
break;
case 1:
{
unique_lock<L1> u1(l1);
i = try_lock(l2, l3..., l0);
if (i == -1)
{
u1.release();
return;
}
}
if (i == sizeof...(L3) + 1) // all except l0 are locked
i = 0;
else
i += 2; // since i was two-based above
sched_yield();
break;
default:
__lock_first(i - 2, l2, l3..., l0, l1);
return;
}
}
}
template <class L0, class L1>
void lock(L0& l0, L1& l1)
{
while (true)
{
{
unique_lock<L0> u0(l0);
if (l1.try_lock())
{
u0.release();
break;
}
}
sched_yield();
// wait and try the other way
{
unique_lock<L1> u1(l1);
if (l0.try_lock())
{
u1.release();
break;
}
}
sched_yield();
}
}
template <class L0, class L1, class... L2>
void lock(L0& l0, L1& l1, L2&... l2)
{
__lock_first(0, l0, l1, l2...);
}
struct once_flag
{
constexpr once_flag() noexcept = default;
once_flag(const once_flag&) = delete;
once_flag& operator=(const once_flag&) = delete;
template <class Callable, class... Args>
friend void call_once(once_flag& flag, Callable&& func, Args&&... args);
private:
pthread_once_t _m_once = PTHREAD_ONCE_INIT;
};
mutex& get_once_mutex();
extern function<void()> once_functor;
extern void set_once_functor_lock_ptr(unique_lock<mutex>*);
extern "C" void once_proxy(); // passed into pthread_once
template <class Callable, class... Args>
void call_once(once_flag& flag, Callable&& func, Args&&... args)
{
// use a lock to ensure the call to the functor
// is exclusive to only the first calling thread
unique_lock<mutex> functor_lock(get_once_mutex());
auto call_wrapper = std::bind(std::forward<Callable>(func), std::forward<Args>(args)...);
once_functor = [&]() { call_wrapper(); };
set_once_functor_lock_ptr(&functor_lock); // so as to unlock when actually calling
int err = pthread_once(&flag._m_once, &once_proxy);
if (functor_lock)
set_once_functor_lock_ptr(nullptr);
if (err)
throw_system_error(err, "call_once failed");
}
}
//#endif // (RT_USING_LIBC) && (RT_USING_PTHREADS)
\ No newline at end of file
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include "mutex"
namespace std
{
// use a set of global and static objects
// a proxy function to pthread_once
function<void()> once_functor;
mutex& get_once_mutex()
{
static mutex once_mutex;
return once_mutex;
}
inline unique_lock<mutex>*& get_once_functor_lock_ptr()
{
static unique_lock<mutex>* once_functor_mutex_ptr = nullptr;
return once_functor_mutex_ptr;
}
void set_once_functor_lock_ptr(unique_lock<mutex>* m_ptr)
{
get_once_functor_lock_ptr() = m_ptr;
}
extern "C"
{
void once_proxy()
{
// need to first transfer the functor's ownership so as to call it
function<void()> once_call = std::move(once_functor);
// no need to hold the lock anymore
unique_lock<mutex>* lock_ptr = get_once_functor_lock_ptr();
get_once_functor_lock_ptr() = nullptr;
lock_ptr->unlock();
once_call();
}
}
}
#pragma once
#if __cplusplus < 201103L
#error "C++ version lower than C++11"
#endif
//#if defined(RT_USING_LIBC) && defined(RT_USING_PTHREADS)
#include <unistd.h>
#include <pthread.h>
#include <sched.h>
#include <rtthread.h>
#include <cstddef>
#include <cerrno>
#include <ostream>
#include <functional>
#include <utility>
#include <chrono>
#include <memory>
#define rt_cpp_thread_t pthread_t
#ifndef PTHREAD_NUM_MAX
#define PTHREAD_NUM_MAX 32
#endif
#define CPP_UNJOINABLE_THREAD PTHREAD_NUM_MAX
namespace std
{
#define __STDCPP_THREADS__ __cplusplus
class thread
{
public:
typedef rt_cpp_thread_t native_handle_type;
struct invoker_base;
typedef shared_ptr<invoker_base> invoker_base_ptr;
class id
{
// basically a wrapper around native_handle_type
native_handle_type __cpp_thread_t;
public:
id() noexcept : __cpp_thread_t(CPP_UNJOINABLE_THREAD) {}
explicit id(native_handle_type hid)
: __cpp_thread_t(hid) {}
private:
friend class thread;
friend class hash<thread::id>;
friend bool operator==(thread::id x, thread::id y) noexcept;
friend bool operator<(thread::id x, thread::id y) noexcept;
template <class charT, class traits>
friend basic_ostream<charT, traits>&
operator<<(basic_ostream<charT, traits>& out, thread::id id);
};
thread() noexcept = default;
thread(const thread&) = delete;
thread& operator=(const thread&) = delete;
~thread();
template <class F, class ...Args>
explicit thread(F&& f, Args&&... args)
{
start_thread(make_invoker_ptr(std::bind(
std::forward<F>(f),
std::forward<Args>(args)...
)));
}
thread(thread&& t) noexcept
{
swap(t);
}
thread& operator=(thread&& t) noexcept
{
if (joinable())
terminate();
swap(t);
return *this;
}
// member functions
void swap(thread& t) noexcept
{
std::swap(_m_thr, t._m_thr);
}
bool joinable() const noexcept
{
return (_m_thr.__cpp_thread_t < PTHREAD_NUM_MAX);
}
void join();
void detach();
id get_id() const noexcept { return _m_thr; }
native_handle_type native_handle() { return _m_thr.__cpp_thread_t; }
// static members
static unsigned hardware_concurrency() noexcept;
private:
id _m_thr;
void start_thread(invoker_base_ptr b);
public:
struct invoker_base
{
invoker_base_ptr this_ptr;
virtual ~invoker_base() = default;
virtual void invoke() = 0;
};
template<typename Callable>
struct invoker : public invoker_base
{
Callable func;
invoker(Callable&& F) : func(std::forward<Callable>(F)) { }
void invoke() { func(); }
};
template <typename Callable>
shared_ptr<invoker<Callable>> make_invoker_ptr(Callable&& F)
{
return std::make_shared<invoker<Callable>>(std::forward<Callable>(F));
}
};
inline void swap(thread& x, thread& y) noexcept
{
x.swap(y);
}
inline bool operator==(thread::id x, thread::id y) noexcept
{
// From POSIX for pthread_equal:
//"If either t1 or t2 are not valid thread IDs, the behavior is undefined."
return x.__cpp_thread_t == y.__cpp_thread_t;
}
inline bool operator!=(thread::id x, thread::id y) noexcept
{
return !(x == y);
}
inline bool operator<(thread::id x, thread::id y) noexcept
{
return x.__cpp_thread_t < y.__cpp_thread_t;
}
inline bool operator<=(thread::id x, thread::id y) noexcept
{
return !(y < x);
}
inline bool operator>(thread::id x, thread::id y) noexcept
{
return !(x <= y);
}
inline bool operator>=(thread::id x, thread::id y) noexcept
{
return !(x < y);
}
template <class charT, class traits>
inline basic_ostream<charT, traits>&
operator<<(basic_ostream<charT, traits>& out, thread::id id)
{
if (id == thread::id()) // id is invalid, representing no pthread
out << "thread::id of a non-executing thread";
else
out << id.__cpp_thread_t;
return out;
}
template <>
struct hash<thread::id>
{
typedef size_t result_type;
typedef thread::id argument_type;
size_t operator()(const thread::id& id) const noexcept
{
return hash<rt_cpp_thread_t>()(id.__cpp_thread_t);
}
};
namespace this_thread
{
inline thread::id get_id() noexcept
{
return thread::id(pthread_self());
}
inline void yield() noexcept
{
sched_yield();
}
template <class Rep, class Period>
inline void sleep_for(const chrono::duration<Rep, Period>& rel_time)
{
if (rel_time <= rel_time.zero()) // less than zero, no need to sleep
return;
auto milli_secs = chrono::duration_cast<chrono::milliseconds>(rel_time);
// the precision is limited by rt-thread thread API
rt_thread_mdelay(milli_secs.count());
}
template <class Clock, class Duration>
inline void sleep_until(const chrono::time_point<Clock, Duration>& abs_time)
{
auto now = Clock::now();
if (abs_time > now)
sleep_for(abs_time - now);
}
}
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include "thread"
#include "__utils.h"
#define _RT_NPROCS 0
namespace std
{
extern "C"
{
static void* execute_native_thread_routine(void *p)
{
thread::invoker_base* t = static_cast<thread::invoker_base*>(p);
thread::invoker_base_ptr local;
local.swap(t->this_ptr); // tranfer the ownership of the invoker into the thread entry
local->invoke();
return NULL;
}
}
void thread::start_thread(invoker_base_ptr b)
{
auto raw_ptr = b.get();
// transfer the ownership of the invoker to the new thread
raw_ptr->this_ptr = std::move(b);
int err = pthread_create(&_m_thr.__cpp_thread_t, NULL,
&execute_native_thread_routine, raw_ptr);
if (err)
{
raw_ptr->this_ptr.reset();
throw_system_error(err, "Failed to create a thread");
}
}
thread::~thread()
{
if (joinable()) // when either not joined or not detached
terminate();
}
void thread::join()
{
int err = EINVAL;
if (joinable())
err = pthread_join(native_handle(), NULL);
if (err)
{
throw_system_error(err, "thread::join failed");
}
_m_thr = id();
}
void thread::detach()
{
int err = EINVAL;
if (joinable())
err = pthread_detach(native_handle());
if (err)
{
throw_system_error(err, "thread::detach failed");
}
_m_thr = id();
}
// TODO: not yet actually implemented.
// The standard states that the returned value should only be considered a hint.
unsigned thread::hardware_concurrency() noexcept
{
int __n = _RT_NPROCS;
if (__n < 0)
__n = 0;
return __n;
}
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include "__utils.h"
#include <sys/time.h>
tick_clock::time_point tick_clock::now()
{
tick_clock::rep cur_tk = clock();
tick_clock::duration cur_time(cur_tk);
return tick_clock::time_point(cur_time);
}
real_time_clock::time_point real_time_clock::now() noexcept
{
timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return time_point(duration(std::chrono::seconds(tp.tv_sec))
+ std::chrono::nanoseconds(tp.tv_nsec));
}
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-04-27 flybreak the first version.
*/
#include <pthread.h>
#include <cstdlib>
typedef void (*destructor) (void *);
extern "C"
int __cxa_thread_atexit_impl(destructor dtor, void* obj, void* dso_symbol)
{
pthread_key_t key_tmp;
if (pthread_key_create(&key_tmp, dtor) != 0)
abort();
pthread_setspecific(key_tmp, obj);
return 0;
}
#if defined(__GNUC__) && !defined(__ARMCC_VERSION)/*GCC*/
#include <cxxabi.h>
extern"C"
int __cxxabiv1::__cxa_thread_atexit(destructor dtor, void *obj, void *dso_handle)
{
return __cxa_thread_atexit_impl(dtor, obj, dso_handle);
}
#endif
......@@ -27,6 +27,7 @@ typedef signed int ssize_t; /* Used for a count of bytes or an error
#else
typedef long signed int ssize_t; /* Used for a count of bytes or an error indication. */
#endif
typedef long suseconds_t; /* microseconds. */
typedef unsigned long useconds_t; /* microseconds (unsigned) */
typedef unsigned long dev_t;
......
......@@ -48,9 +48,10 @@ rt_int8_t rt_tz_is_dst(void);
#ifndef _TIMEVAL_DEFINED
#define _TIMEVAL_DEFINED
#if !(defined(_WIN32))
struct timeval {
long tv_sec; /* seconds */
long tv_usec; /* and microseconds */
struct timeval
{
time_t tv_sec; /* seconds */
suseconds_t tv_usec; /* and microseconds */
};
#endif
#endif /* _TIMEVAL_DEFINED */
......
......@@ -90,9 +90,30 @@ void _pthread_data_destroy(pthread_t pth)
{
RT_DECLARE_SPINLOCK(pth_lock);
extern _pthread_key_data_t _thread_keys[PTHREAD_KEY_MAX];
_pthread_data_t *ptd = _pthread_get_data(pth);
if (ptd)
{
/* destruct thread local key */
if (ptd->tls != RT_NULL)
{
void *data;
rt_uint32_t index;
for (index = 0; index < PTHREAD_KEY_MAX; index ++)
{
if (_thread_keys[index].is_used)
{
data = ptd->tls[index];
if (data && _thread_keys[index].destructor)
_thread_keys[index].destructor(data);
}
}
/* release tls area */
rt_free(ptd->tls);
ptd->tls = RT_NULL;
}
/* remove from pthread table */
rt_hw_spin_lock(&pth_lock);
pth_table[pth] = NULL;
......@@ -103,13 +124,17 @@ void _pthread_data_destroy(pthread_t pth)
rt_sem_delete(ptd->joinable_sem);
/* release thread resource */
if (ptd->attr.stackaddr == RT_NULL && ptd->tid->stack_addr != RT_NULL)
if (ptd->attr.stackaddr == RT_NULL)
{
/* release thread allocated stack */
rt_free(ptd->tid->stack_addr);
if (ptd->tid)
{
rt_free(ptd->tid->stack_addr);
}
}
/* clean stack addr pointer */
ptd->tid->stack_addr = RT_NULL;
if (ptd->tid)
ptd->tid->stack_addr = RT_NULL;
/*
* if this thread create the local thread data,
......@@ -221,6 +246,12 @@ int pthread_create(pthread_t *pid,
pthread_attr_init(&ptd->attr);
}
if (ptd->attr.stacksize == 0)
{
ret = EINVAL;
goto __exit;
}
rt_snprintf(name, sizeof(name), "pth%02d", pthread_number ++);
/* pthread is a static thread object */
......@@ -269,7 +300,7 @@ int pthread_create(pthread_t *pid,
/* initial this pthread to system */
if (rt_thread_init(ptd->tid, name, pthread_entry_stub, ptd,
stack, ptd->attr.stacksize,
ptd->attr.schedparam.sched_priority, 5) != RT_EOK)
ptd->attr.schedparam.sched_priority, 20) != RT_EOK)
{
ret = EINVAL;
goto __exit;
......@@ -460,7 +491,7 @@ void pthread_exit(void *value)
if (_thread_keys[index].is_used)
{
data = ptd->tls[index];
if (data)
if (data && _thread_keys[index].destructor)
_thread_keys[index].destructor(data);
}
}
......@@ -507,14 +538,21 @@ int pthread_kill(pthread_t thread, int sig)
{
#ifdef RT_USING_SIGNALS
_pthread_data_t *ptd;
int ret;
ptd = _pthread_get_data(thread);
if (ptd)
{
return rt_thread_kill(ptd->tid, sig);
ret = rt_thread_kill(ptd->tid, sig);
if (ret == -RT_EINVAL)
{
return EINVAL;
}
return ret;
}
return EINVAL;
return ESRCH;
#else
return ENOSYS;
#endif
......@@ -707,3 +745,4 @@ int pthread_cancel(pthread_t thread)
return 0;
}
RTM_EXPORT(pthread_cancel);
......@@ -265,6 +265,11 @@ int pthread_barrier_init(pthread_barrier_t *barrier,
int pthread_barrier_wait(pthread_barrier_t *barrier);
int pthread_setspecific(pthread_key_t key, const void *value);
void *pthread_getspecific(pthread_key_t key);
int pthread_key_create(pthread_key_t *key, void (*destructor)(void *));
int pthread_key_delete(pthread_key_t key);
#ifdef __cplusplus
}
#endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册