未验证 提交 51943b6f 编写于 作者: S slguan 提交者: GitHub

Merge pull request #684 from taosdata/feature/newtimer

new timer implementation
......@@ -21,40 +21,41 @@ extern "C" {
#endif
typedef void *tmr_h;
typedef void (*TAOS_TMR_CALLBACK)(void *, void *);
extern uint32_t tmrDebugFlag;
extern int taosTmrThreads;
extern int taosTmrThreads;
#define tmrError(...) \
if (tmrDebugFlag & DEBUG_ERROR) { \
do { if (tmrDebugFlag & DEBUG_ERROR) { \
tprintf("ERROR TMR ", tmrDebugFlag, __VA_ARGS__); \
}
} } while(0)
#define tmrWarn(...) \
if (tmrDebugFlag & DEBUG_WARN) { \
do { if (tmrDebugFlag & DEBUG_WARN) { \
tprintf("WARN TMR ", tmrDebugFlag, __VA_ARGS__); \
}
} } while(0)
#define tmrTrace(...) \
if (tmrDebugFlag & DEBUG_TRACE) { \
do { if (tmrDebugFlag & DEBUG_TRACE) { \
tprintf("TMR ", tmrDebugFlag, __VA_ARGS__); \
}
} } while(0)
#define MAX_NUM_OF_TMRCTL 512
#define MAX_NUM_OF_TMRCTL 32
#define MSECONDS_PER_TICK 5
void *taosTmrInit(int maxTmr, int resoultion, int longest, char *label);
void *taosTmrInit(int maxTmr, int resoultion, int longest, const char *label);
tmr_h taosTmrStart(void (*fp)(void *, void *), int mseconds, void *param1, void *handle);
tmr_h taosTmrStart(TAOS_TMR_CALLBACK fp, int mseconds, void *param, void *handle);
void taosTmrStop(tmr_h tmrId);
bool taosTmrStop(tmr_h tmrId);
void taosTmrStopA(tmr_h *timerId);
bool taosTmrStopA(tmr_h *timerId);
void taosTmrReset(void (*fp)(void *, void *), int mseconds, void *param1, void *handle, tmr_h *pTmrId);
bool taosTmrReset(TAOS_TMR_CALLBACK fp, int mseconds, void *param, void *handle, tmr_h *pTmrId);
void taosTmrCleanUp(void *handle);
void taosTmrList(void *handle);
#ifdef __cplusplus
}
#endif
......
......@@ -55,10 +55,44 @@
#define taosWriteSocket(fd, buf, len) write(fd, buf, len)
#define taosReadSocket(fd, buf, len) read(fd, buf, len)
#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_64(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_ptr(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_store_8(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_store_16(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_store_32(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_store_64(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_store_ptr(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_8(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_16(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_32(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_64(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_ptr(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
// TODO: update prefix of below macros to 'atomic' as '__' is reserved by compiler
// and GCC suggest new code to use '__atomic' builtins to replace '__sync' builtins.
#define __sync_val_compare_and_swap_64 __sync_val_compare_and_swap
#define __sync_val_compare_and_swap_32 __sync_val_compare_and_swap
#define __sync_val_compare_and_swap_16 __sync_val_compare_and_swap
#define __sync_val_compare_and_swap_8 __sync_val_compare_and_swap
#define __sync_val_compare_and_swap_ptr __sync_val_compare_and_swap
#define __sync_add_and_fetch_64 __sync_add_and_fetch
#define __sync_add_and_fetch_32 __sync_add_and_fetch
#define __sync_add_and_fetch_16 __sync_add_and_fetch
#define __sync_add_and_fetch_8 __sync_add_and_fetch
#define __sync_add_and_fetch_ptr __sync_add_and_fetch
#define __sync_sub_and_fetch_64 __sync_sub_and_fetch
#define __sync_sub_and_fetch_32 __sync_sub_and_fetch
#define __sync_sub_and_fetch_16 __sync_sub_and_fetch
#define __sync_sub_and_fetch_8 __sync_sub_and_fetch
#define __sync_sub_and_fetch_ptr __sync_sub_and_fetch
int32_t __sync_val_load_32(int32_t *ptr);
void __sync_val_restore_32(int32_t *ptr, int32_t newval);
......
......@@ -71,14 +71,43 @@ extern "C" {
#define taosWriteSocket(fd, buf, len) write(fd, buf, len)
#define taosReadSocket(fd, buf, len) read(fd, buf, len)
#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_64(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_ptr(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_store_8(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_store_16(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_store_32(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_store_64(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_store_ptr(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_8(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_16(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_32(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_64(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
#define atomic_exchange_ptr(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST)
// TODO: update prefix of below macros to 'atomic' as '__' is reserved by compiler
// and GCC suggest new code to use '__atomic' builtins to replace '__sync' builtins.
#define __sync_val_compare_and_swap_64 __sync_val_compare_and_swap
#define __sync_val_compare_and_swap_32 __sync_val_compare_and_swap
#define __sync_val_compare_and_swap_16 __sync_val_compare_and_swap
#define __sync_val_compare_and_swap_8 __sync_val_compare_and_swap
#define __sync_val_compare_and_swap_ptr __sync_val_compare_and_swap
#define __sync_add_and_fetch_64 __sync_add_and_fetch
#define __sync_add_and_fetch_32 __sync_add_and_fetch
#define __sync_add_and_fetch_16 __sync_add_and_fetch
#define __sync_add_and_fetch_8 __sync_add_and_fetch
#define __sync_add_and_fetch_ptr __sync_add_and_fetch
#define __sync_sub_and_fetch_64 __sync_sub_and_fetch
#define __sync_sub_and_fetch_32 __sync_sub_and_fetch
#define __sync_sub_and_fetch_16 __sync_sub_and_fetch
#define __sync_sub_and_fetch_8 __sync_sub_and_fetch
#define __sync_sub_and_fetch_ptr __sync_sub_and_fetch
int32_t __sync_val_load_32(int32_t *ptr);
void __sync_val_restore_32(int32_t *ptr, int32_t newval);
......
......@@ -29,6 +29,7 @@
#include <math.h>
#include <string.h>
#include <assert.h>
#include <intrin.h>
#ifdef __cplusplus
extern "C" {
......@@ -78,12 +79,75 @@ extern "C" {
#define taosWriteSocket(fd, buf, len) send(fd, buf, len, 0)
#define taosReadSocket(fd, buf, len) recv(fd, buf, len, 0)
int32_t __sync_val_compare_and_swap_32(int32_t *ptr, int32_t oldval, int32_t newval);
int32_t __sync_add_and_fetch_32(int32_t *ptr, int32_t val);
int32_t __sync_sub_and_fetch_32(int32_t *ptr, int32_t val);
int64_t __sync_val_compare_and_swap_64(int64_t *ptr, int64_t oldval, int64_t newval);
int64_t __sync_add_and_fetch_64(int64_t *ptr, int64_t val);
int64_t __sync_sub_and_fetch_64(int64_t *ptr, int64_t val);
#if defined(_M_ARM) || defined(_M_ARM64)
#define atomic_load_8(ptr) __iso_volatile_load8((const volatile __int8*)(ptr))
#define atomic_load_16(ptr) __iso_volatile_load16((const volatile __int16*)(ptr))
#define atomic_load_32(ptr) __iso_volatile_load32((const volatile __int32*)(ptr))
#define atomic_load_64(ptr) __iso_volatile_load64((const volatile __int64*)(ptr))
#define atomic_store_8(ptr, val) __iso_volatile_store8((volatile __int8*)(ptr), (__int8)(val))
#define atomic_store_16(ptr, val) __iso_volatile_store16((volatile __int16*)(ptr), (__int16)(val))
#define atomic_store_32(ptr, val) __iso_volatile_store32((volatile __int32*)(ptr), (__int32)(val))
#define atomic_store_64(ptr, val) __iso_volatile_store64((volatile __int64*)(ptr), (__int64)(val))
#ifdef _M_ARM64
#define atomic_load_ptr atomic_load_64
#define atomic_store_ptr atomic_store_64
#else
#define atomic_load_ptr atomic_load_32
#define atomic_store_ptr atomic_store_32
#endif
#else
#define atomic_load_8(ptr) (*(char volatile*)(ptr))
#define atomic_load_16(ptr) (*(short volatile*)(ptr))
#define atomic_load_32(ptr) (*(long volatile*)(ptr))
#define atomic_load_64(ptr) (*(__int64 volatile*)(ptr))
#define atomic_load_ptr(ptr) (*(void* volatile*)(ptr))
#define atomic_store_8(ptr, val) ((*(char volatile*)(ptr)) = (char)(val))
#define atomic_store_16(ptr, val) ((*(short volatile*)(ptr)) = (short)(val))
#define atomic_store_32(ptr, val) ((*(long volatile*)(ptr)) = (long)(val))
#define atomic_store_64(ptr, val) ((*(__int64 volatile*)(ptr)) = (__int64)(val))
#define atomic_store_ptr(ptr, val) ((*(void* volatile*)(ptr)) = (void*)(val))
#endif
#define atomic_exchange_8(ptr, val) _InterlockedExchange8((char volatile*)(ptr), (char)(val))
#define atomic_exchange_16(ptr, val) _InterlockedExchange16((short volatile*)(ptr), (short)(val))
#define atomic_exchange_32(ptr, val) _InterlockedExchange((long volatile*)(ptr), (long)(val))
#define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val))
#define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
#define __sync_val_compare_and_swap_8(ptr, oldval, newval) _InterlockedCompareExchange8((char volatile*)(ptr), (char)(newval), (char)(oldval))
#define __sync_val_compare_and_swap_16(ptr, oldval, newval) _InterlockedCompareExchange16((short volatile*)(ptr), (short)(newval), (short)(oldval))
#define __sync_val_compare_and_swap_32(ptr, oldval, newval) _InterlockedCompareExchange((long volatile*)(ptr), (long)(newval), (long)(oldval))
#define __sync_val_compare_and_swap_64(ptr, oldval, newval) _InterlockedCompareExchange64((__int64 volatile*)(ptr), (__int64)(newval), (__int64)(oldval))
#define __sync_val_compare_and_swap_ptr(ptr, oldval, newval) _InterlockedCompareExchangePointer((void* volatile*)(ptr), (void*)(newval), (void*)(oldval))
char interlocked_add_8(char volatile *ptr, char val);
short interlocked_add_16(short volatile *ptr, short val);
long interlocked_add_32(long volatile *ptr, long val);
__int64 interlocked_add_64(__int64 volatile *ptr, __int64 val);
#define __sync_add_and_fetch_8(ptr, val) interlocked_add_8((char volatile*)(ptr), (char)(val))
#define __sync_add_and_fetch_16(ptr, val) interlocked_add_16((short volatile*)(ptr), (short)(val))
#define __sync_add_and_fetch_32(ptr, val) interlocked_add_32((long volatile*)(ptr), (long)(val))
#define __sync_add_and_fetch_64(ptr, val) interlocked_add_64((__int64 volatile*)(ptr), (__int64)(val))
#ifdef _WIN64
#define __sync_add_and_fetch_ptr __sync_add_and_fetch_64
#else
#define __sync_add_and_fetch_ptr __sync_add_and_fetch_32
#endif
#define __sync_sub_and_fetch_8(ptr, val) __sync_add_and_fetch_8((ptr), -(val))
#define __sync_sub_and_fetch_16(ptr, val) __sync_add_and_fetch_16((ptr), -(val))
#define __sync_sub_and_fetch_32(ptr, val) __sync_add_and_fetch_32((ptr), -(val))
#define __sync_sub_and_fetch_64(ptr, val) __sync_add_and_fetch_64((ptr), -(val))
#define __sync_sub_and_fetch_ptr(ptr, val) __sync_add_and_fetch_ptr((ptr), -(val))
int32_t __sync_val_load_32(int32_t *ptr);
void __sync_val_restore_32(int32_t *ptr, int32_t newval);
......
......@@ -43,8 +43,11 @@ void taosResetPthread(pthread_t *thread) {
}
int64_t taosGetPthreadId() {
pthread_t id = pthread_self();
return (int64_t)id.p;
#ifdef PTW32_VERSION
return pthread_getw32threadid_np(pthread_self());
#else
return (int64_t)pthread_self();
#endif
}
int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optlen) {
......@@ -63,28 +66,21 @@ int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optle
return setsockopt(socketfd, level, optname, optval, optlen);
}
int32_t __sync_val_compare_and_swap_32(int32_t *ptr, int32_t oldval, int32_t newval) {
return InterlockedCompareExchange(ptr, newval, oldval);
}
int32_t __sync_add_and_fetch_32(int32_t *ptr, int32_t val) {
return InterlockedAdd(ptr, val);
}
int32_t __sync_sub_and_fetch_32(int32_t *ptr, int32_t val) {
return InterlockedAdd(ptr, -val);
char interlocked_add_8(char volatile* ptr, char val) {
return _InterlockedExchangeAdd8(ptr, val) + val;
}
int64_t __sync_val_compare_and_swap_64(int64_t *ptr, int64_t oldval, int64_t newval) {
return InterlockedCompareExchange64(ptr, newval, oldval);
short interlocked_add_16(short volatile* ptr, short val) {
return _InterlockedExchangeAdd16(ptr, val) + val;
}
int64_t __sync_add_and_fetch_64(int64_t *ptr, int64_t val) {
return InterlockedAdd64(ptr, val);
long interlocked_add_32(long volatile* ptr, long val) {
return _InterlockedExchangeAdd(ptr, val) + val;
}
int64_t __sync_sub_and_fetch_64(int64_t *ptr, int64_t val) {
return InterlockedAdd64(ptr, -val);
__int64 interlocked_add_64(__int64 volatile* ptr, __int64 val) {
return _InterlockedExchangeAdd64(ptr, val) + val;
}
int32_t __sync_val_load_32(int32_t *ptr) {
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册