未验证 提交 9e47180e 编写于 作者: wmmhello's avatar wmmhello 提交者: GitHub

Merge pull request #12769 from taosdata/3.0

3.0
......@@ -117,27 +117,29 @@ def pre_test(){
def pre_test_win(){
bat '''
hostname
ipconfig
set
date /t
time /t
taskkill /f /t /im python.exe
taskkill /f /t /im bash.exe
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug
exit 0
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug || exit 0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git reset --hard
git fetch || git fetch
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git reset --hard
git fetch || git fetch
git checkout -f
'''
script {
if (env.CHANGE_TARGET == 'master') {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout master
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout master
'''
......@@ -145,6 +147,8 @@ def pre_test_win(){
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout 2.0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout 2.0
'''
......@@ -152,6 +156,8 @@ def pre_test_win(){
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout 3.0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout 3.0
'''
......@@ -159,6 +165,8 @@ def pre_test_win(){
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout develop
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout develop
'''
......@@ -169,30 +177,52 @@ def pre_test_win(){
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git pull
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git pull
git fetch origin +refs/pull/${CHANGE_ID}/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git fetch origin +refs/pull/%CHANGE_ID%/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git checkout -qf FETCH_HEAD
git log -5
'''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git pull
git fetch origin +refs/pull/${CHANGE_ID}/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git fetch origin +refs/pull/%CHANGE_ID%/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git checkout -qf FETCH_HEAD
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git pull
git log -5
'''
} else {
sh '''
echo "unmatched reposiotry ${CHANGE_URL}"
bat '''
echo "unmatched reposiotry %CHANGE_URL%"
'''
}
}
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
git branch
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git branch
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
git submodule update --init --recursive
......@@ -205,10 +235,15 @@ def pre_test_build_win() {
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
mkdir debug
cd debug
time /t
call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" x64
set CL=/MP8
cmake .. -G "NMake Makefiles JOM"
jom -j 4 || exit 8
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
time /t
cmake .. -G "NMake Makefiles JOM" || exit 7
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
time /t
jom -j 6 || exit 8
time /t
'''
return 1
......@@ -226,6 +261,13 @@ pipeline {
stages {
stage('run test') {
parallel {
stage('windows test') {
agent{label " windows10_01 || windows10_02 || windows10_03 || windows10_04 "}
steps {
pre_test_win()
pre_test_build_win()
}
}
stage('linux test') {
agent{label " slave3_0 || slave15 || slave16 || slave17 "}
options { skipDefaultCheckout() }
......
......@@ -46,11 +46,17 @@ ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/W3 /D_WIN32")
SET(COMMON_FLAGS "/w /D_WIN32")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
# ENDIF ()
IF (CMAKE_DEPFILE_FLAGS_C)
SET(CMAKE_DEPFILE_FLAGS_C "")
ENDIF ()
IF (CMAKE_DEPFILE_FLAGS_CXX)
SET(CMAKE_DEPFILE_FLAGS_CXX "")
ENDIF ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
......
......@@ -5,22 +5,27 @@ IF (TD_LINUX)
ELSEIF (TD_WINDOWS)
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector)
INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/go DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/nodejs DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .)
INSTALL(FILES ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg DESTINATION cfg)
INSTALL(FILES ${TD_SOURCE_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/src/inc/taoserror.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos_static.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)
ENDIF ()
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})")
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
......
......@@ -46,6 +46,18 @@ IF(${TD_WINDOWS})
ON
)
option(
BUILD_TEST
"If build unit tests using googletest"
OFF
)
ELSE ()
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
ENDIF ()
option(
......@@ -54,12 +66,6 @@ option(
OFF
)
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
option(
BUILD_WITH_LEVELDB
"If build with leveldb"
......
......@@ -226,10 +226,10 @@ endif(${BUILD_WITH_NURAFT})
if(${BUILD_PTHREAD})
set(CMAKE_BUILD_TYPE release)
add_definitions(-DPTW32_STATIC_LIB)
add_subdirectory(pthread)
add_subdirectory(pthread EXCLUDE_FROM_ALL)
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
add_library(pthread STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET pthread PROPERTY IMPORTED_LOCATION ${LIBRARY_OUTPUT_PATH}/pthread.lib)
add_library(pthread INTERFACE)
target_link_libraries(pthread INTERFACE libpthreadVC3)
endif()
# iconv
......
......@@ -239,7 +239,7 @@ void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
msg_process(tmqmessage);
taos_free_result(tmqmessage);
tmq_commit(tmq, NULL, 1);
tmq_commit_async(tmq, NULL, tmq_commit_cb_print, NULL);
/*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
}
}
......
......@@ -232,11 +232,11 @@ DLL_EXPORT tmq_resp_err_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT tmq_resp_err_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t wait_time);
DLL_EXPORT tmq_resp_err_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT tmq_resp_err_t tmq_commit(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, int32_t async);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, tmq_commit_cb *cb, void *param);
DLL_EXPORT tmq_resp_err_t tmq_commit_sync(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, tmq_commit_cb *cb, void *param);
#if 0
DLL_EXPORT tmq_resp_err_t tmq_commit_message(tmq_t* tmq, const tmq_message_t* tmqmessage, int32_t async);
DLL_EXPORT tmq_resp_err_t tmq_commit(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, int32_t async);
DLL_EXPORT tmq_resp_err_t tmq_seek(tmq_t *tmq, const tmq_topic_vgroup_t *offset);
#endif
......
......@@ -301,6 +301,8 @@ typedef struct SSchema {
typedef struct {
int32_t nCols;
int32_t sver;
int32_t tagVer;
int32_t colVer;
SSchema* pSchema;
} SSchemaWrapper;
......@@ -309,6 +311,8 @@ static FORCE_INLINE SSchemaWrapper* tCloneSSchemaWrapper(const SSchemaWrapper* p
if (pSW == NULL) return pSW;
pSW->nCols = pSchemaWrapper->nCols;
pSW->sver = pSchemaWrapper->sver;
pSW->tagVer = pSchemaWrapper->tagVer;
pSW->colVer = pSchemaWrapper->colVer;
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) {
taosMemoryFree(pSW);
......@@ -364,6 +368,8 @@ static FORCE_INLINE int32_t taosEncodeSSchemaWrapper(void** buf, const SSchemaWr
int32_t tlen = 0;
tlen += taosEncodeVariantI32(buf, pSW->nCols);
tlen += taosEncodeVariantI32(buf, pSW->sver);
tlen += taosEncodeVariantI32(buf, pSW->tagVer);
tlen += taosEncodeVariantI32(buf, pSW->colVer);
for (int32_t i = 0; i < pSW->nCols; i++) {
tlen += taosEncodeSSchema(buf, &pSW->pSchema[i]);
}
......@@ -373,6 +379,8 @@ static FORCE_INLINE int32_t taosEncodeSSchemaWrapper(void** buf, const SSchemaWr
static FORCE_INLINE void* taosDecodeSSchemaWrapper(const void* buf, SSchemaWrapper* pSW) {
buf = taosDecodeVariantI32(buf, &pSW->nCols);
buf = taosDecodeVariantI32(buf, &pSW->sver);
buf = taosDecodeVariantI32(buf, &pSW->tagVer);
buf = taosDecodeVariantI32(buf, &pSW->colVer);
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) {
return NULL;
......@@ -387,6 +395,8 @@ static FORCE_INLINE void* taosDecodeSSchemaWrapper(const void* buf, SSchemaWrapp
static FORCE_INLINE int32_t tEncodeSSchemaWrapper(SEncoder* pEncoder, const SSchemaWrapper* pSW) {
if (tEncodeI32v(pEncoder, pSW->nCols) < 0) return -1;
if (tEncodeI32v(pEncoder, pSW->sver) < 0) return -1;
if (tEncodeI32v(pEncoder, pSW->tagVer) < 0) return -1;
if (tEncodeI32v(pEncoder, pSW->colVer) < 0) return -1;
for (int32_t i = 0; i < pSW->nCols; i++) {
if (tEncodeSSchema(pEncoder, &pSW->pSchema[i]) < 0) return -1;
}
......@@ -397,6 +407,8 @@ static FORCE_INLINE int32_t tEncodeSSchemaWrapper(SEncoder* pEncoder, const SSch
static FORCE_INLINE int32_t tDecodeSSchemaWrapper(SDecoder* pDecoder, SSchemaWrapper* pSW) {
if (tDecodeI32v(pDecoder, &pSW->nCols) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->sver) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->tagVer) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->colVer) < 0) return -1;
pSW->pSchema = (SSchema*)taosMemoryCalloc(pSW->nCols, sizeof(SSchema));
if (pSW->pSchema == NULL) return -1;
......@@ -410,6 +422,8 @@ static FORCE_INLINE int32_t tDecodeSSchemaWrapper(SDecoder* pDecoder, SSchemaWra
static FORCE_INLINE int32_t tDecodeSSchemaWrapperEx(SDecoder* pDecoder, SSchemaWrapper* pSW) {
if (tDecodeI32v(pDecoder, &pSW->nCols) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->sver) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->tagVer) < 0) return -1;
if (tDecodeI32v(pDecoder, &pSW->colVer) < 0) return -1;
pSW->pSchema = (SSchema*)tDecoderMalloc(pDecoder, pSW->nCols * sizeof(SSchema));
if (pSW->pSchema == NULL) return -1;
......@@ -455,6 +469,7 @@ int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t alterType;
int32_t verInBlock;
int32_t numOfFields;
SArray* pFields;
int32_t ttl;
......@@ -1480,6 +1495,7 @@ typedef struct {
typedef struct {
int64_t consumerId;
char cgroup[TSDB_CGROUP_LEN];
char clientId[256];
SArray* topicNames; // SArray<char**>
} SCMSubscribeReq;
......@@ -1487,6 +1503,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pReq->consumerId);
tlen += taosEncodeString(buf, pReq->cgroup);
tlen += taosEncodeString(buf, pReq->clientId);
int32_t topicNum = taosArrayGetSize(pReq->topicNames);
tlen += taosEncodeFixedI32(buf, topicNum);
......@@ -1500,6 +1517,7 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
static FORCE_INLINE void* tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeReq* pReq) {
buf = taosDecodeFixedI64(buf, &pReq->consumerId);
buf = taosDecodeStringTo(buf, pReq->cgroup);
buf = taosDecodeStringTo(buf, pReq->clientId);
int32_t topicNum;
buf = taosDecodeFixedI32(buf, &topicNum);
......@@ -1630,6 +1648,15 @@ typedef struct {
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
char cgroup[TSDB_CGROUP_LEN];
int8_t igNotExists;
} SMDropCgroupReq;
int32_t tSerializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
int32_t tDeserializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t alterType;
......@@ -2562,18 +2589,6 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) {
taosArrayDestroyEx(pRsp->topics, (void (*)(void*))tDeleteSMqSubTopicEp);
}
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t sourceVg;
int64_t sourceVer;
SArray* data; // SArray<SSDataBlock>
} SStreamDispatchReq;
typedef struct {
int8_t inputStatus;
} SStreamDispatchRsp;
#define TD_AUTO_CREATE_TABLE 0x1
typedef struct {
int64_t suid;
......
......@@ -60,9 +60,9 @@ typedef struct {
ReportStartup reportStartupFp;
} SMsgCb;
void tmsgSetDefaultMsgCb(const SMsgCb* pMsgCb);
int32_t tmsgPutToQueue(const SMsgCb* pMsgCb, EQueueType qtype, SRpcMsg* pMsg);
int32_t tmsgGetQueueSize(const SMsgCb* pMsgCb, int32_t vgId, EQueueType qtype);
void tmsgSetDefault(const SMsgCb* msgcb);
int32_t tmsgPutToQueue(const SMsgCb* msgcb, EQueueType qtype, SRpcMsg* pMsg);
int32_t tmsgGetQueueSize(const SMsgCb* msgcb, int32_t vgId, EQueueType qtype);
int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg);
void tmsgSendRsp(SRpcMsg* pMsg);
void tmsgSendRedirectRsp(SRpcMsg* pMsg, const SEpSet* pNewEpSet);
......
......@@ -200,6 +200,10 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_TASK_WRITE_EXEC, "vnode-task-write-exec", SStreamTaskExecReq, SStreamTaskExecRsp)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TASK_RUN, "vnode-stream-task-run", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TASK_DISPATCH, "vnode-stream-task-dispatch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TASK_RECOVER, "vnode-stream-task-recover", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
......
......@@ -179,6 +179,8 @@ typedef struct {
} \
} while (0)
//TODO: use varchar(0) to represent NULL type
#define IS_NULL_TYPE(_t) ((_t) == TSDB_DATA_TYPE_NULL)
#define IS_SIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_TINYINT && (_t) <= TSDB_DATA_TYPE_BIGINT)
#define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT)
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)
......
......@@ -333,23 +333,23 @@ SSdbRow *sdbAllocRow(int32_t objSize);
void *sdbGetRowObj(SSdbRow *pRow);
typedef struct SSdb {
SMnode *pMnode;
char *currDir;
char *syncDir;
char *tmpDir;
int64_t lastCommitVer;
int64_t curVer;
int64_t tableVer[SDB_MAX];
int64_t maxId[SDB_MAX];
EKeyType keyTypes[SDB_MAX];
SHashObj *hashObjs[SDB_MAX];
SRWLatch locks[SDB_MAX];
SdbInsertFp insertFps[SDB_MAX];
SdbUpdateFp updateFps[SDB_MAX];
SdbDeleteFp deleteFps[SDB_MAX];
SdbDeployFp deployFps[SDB_MAX];
SdbEncodeFp encodeFps[SDB_MAX];
SdbDecodeFp decodeFps[SDB_MAX];
SMnode *pMnode;
char *currDir;
char *syncDir;
char *tmpDir;
int64_t lastCommitVer;
int64_t curVer;
int64_t tableVer[SDB_MAX];
int64_t maxId[SDB_MAX];
EKeyType keyTypes[SDB_MAX];
SHashObj *hashObjs[SDB_MAX];
TdThreadRwlock locks[SDB_MAX];
SdbInsertFp insertFps[SDB_MAX];
SdbUpdateFp updateFps[SDB_MAX];
SdbDeleteFp deleteFps[SDB_MAX];
SdbDeployFp deployFps[SDB_MAX];
SdbEncodeFp encodeFps[SDB_MAX];
SdbDecodeFp decodeFps[SDB_MAX];
} SSdb;
#ifdef __cplusplus
......
......@@ -59,6 +59,11 @@ typedef struct SMetaData {
SArray *pQnodeList; // qnode list, SArray<SQueryNodeAddr>
} SMetaData;
typedef struct STbSVersion {
char* tbFName;
int32_t sver;
} STbSVersion;
typedef struct SCatalogCfg {
uint32_t maxTblCacheNum;
uint32_t maxDBCacheNum;
......@@ -165,6 +170,8 @@ int32_t catalogUpdateSTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg);
*/
int32_t catalogRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* dbFName);
int32_t catalogChkTbMetaVersion(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, SArray* pTables);
/**
* Force refresh a table's local cached meta data.
* @param pCatalog (input, got with catalogGetHandle)
......
......@@ -42,7 +42,7 @@ typedef struct SReadHandle {
#define STREAM_DATA_TYPE_SSDATA_BLOCK 0x2
typedef enum {
OPTR_EXEC_MODEL_BATCH = 0x1,
OPTR_EXEC_MODEL_BATCH = 0x1,
OPTR_EXEC_MODEL_STREAM = 0x2,
} EOPTR_EXEC_MODEL;
......@@ -81,7 +81,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
* @param isAdd
* @return
*/
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, SArray* tableIdList, bool isAdd);
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
/**
* Create the exec task object according to task json
......@@ -95,6 +95,15 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, SArray* tableIdList, bool isA
int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, struct SSubplan* pPlan,
qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, EOPTR_EXEC_MODEL model);
/**
*
* @param tinfo
* @param sversion
* @param tversion
* @return
*/
int32_t qGetQueriedTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, int32_t* tversion);
/**
* The main task execution function, including query on both table and multiple tables,
* which are decided according to the tag or table name query conditions
......@@ -169,7 +178,7 @@ int32_t qUpdateQueriedTableIdList(qTaskInfo_t tinfo, int64_t uid, int32_t type);
void qProcessFetchRsp(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t *resNum, SExplainExecInfo **pRes);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes);
#ifdef __cplusplus
}
......
......@@ -142,6 +142,8 @@ void fmFuncMgtDestroy();
int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc);
bool fmIsBuiltinFunc(const char* pFunc);
bool fmIsAggFunc(int32_t funcId);
bool fmIsScalarFunc(int32_t funcId);
bool fmIsNonstandardSQLFunc(int32_t funcId);
......
......@@ -78,7 +78,7 @@ typedef struct SAlterDatabaseStmt {
typedef struct STableOptions {
ENodeType type;
char comment[TSDB_STB_COMMENT_LEN];
char comment[TSDB_TB_COMMENT_LEN];
int32_t delay;
float filesFactor;
SNodeList* pRollupFuncs;
......@@ -90,7 +90,7 @@ typedef struct SColumnDefNode {
ENodeType type;
char colName[TSDB_COL_NAME_LEN];
SDataType dataType;
char comments[TSDB_STB_COMMENT_LEN];
char comments[TSDB_TB_COMMENT_LEN];
bool sma;
} SColumnDefNode;
......
......@@ -208,6 +208,7 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW,
QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW,
......
......@@ -236,6 +236,7 @@ typedef struct SSelectStmt {
bool isTimeOrderQuery;
bool hasAggFuncs;
bool hasRepeatScanFuncs;
bool hasNonstdSQLFunc;
} SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
......@@ -248,6 +249,7 @@ typedef struct SSetOperator {
SNode* pRight;
SNodeList* pOrderByList; // SOrderByExprNode
SNode* pLimit;
char stmtName[TSDB_TABLE_NAME_LEN];
} SSetOperator;
typedef enum ESqlClause {
......@@ -349,9 +351,6 @@ bool nodesIsComparisonOp(const SOperatorNode* pOp);
bool nodesIsJsonOp(const SOperatorNode* pOp);
bool nodesIsRegularOp(const SOperatorNode* pOp);
bool nodesIsTimeorderQuery(const SNode* pQuery);
bool nodesIsTimelineQuery(const SNode* pQuery);
void* nodesGetValueFromNode(SValueNode* pNode);
int32_t nodesSetValueNodeValue(SValueNode* pNode, void* value);
char* nodesGetStrValueFromNode(SValueNode* pNode);
......
......@@ -182,8 +182,10 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define SET_META_TYPE_TABLE(t) (t) = META_TYPE_TABLE
#define SET_META_TYPE_BOTH_TABLE(t) (t) = META_TYPE_BOTH_TABLE
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST)
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST || \
(_code) == TSDB_CODE_PAR_INVALID_COLUMNS_NUM || (_code) == TSDB_CODE_PAR_INVALID_COLUMN || \
(_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED)
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
......@@ -194,7 +196,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define NEED_SCHEDULER_RETRY_ERROR(_code) \
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
#define REQUEST_MAX_TRY_TIMES 5
#define REQUEST_MAX_TRY_TIMES 1
#define qFatal(...) \
do { \
......@@ -220,23 +222,23 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
taosPrintLog("QRY ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebug(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
} \
#define qDebug(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qTrace(...) \
do { \
if (qDebugFlag & DEBUG_TRACE) { \
taosPrintLog("QRY ", DEBUG_TRACE, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
} \
#define qTrace(...) \
do { \
if (qDebugFlag & DEBUG_TRACE) { \
taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebugL(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLongString("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
} \
#define qDebugL(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define QRY_ERR_RET(c) \
......
......@@ -72,7 +72,7 @@ int32_t schedulerInit(SSchedulerCfg *cfg);
* @param nodeList Qnode/Vnode address list, element is SQueryNodeAddr
* @return
*/
int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, bool needRes, SQueryResult *pRes);
int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, int64_t *pJob, const char *sql, int64_t startTs, SQueryResult *pRes);
/**
* Process the query job, generated according to the query physical plan.
......
......@@ -107,6 +107,7 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit)
if (ref == 0) {
taosMemoryFree(pDataSubmit->data);
taosMemoryFree(pDataSubmit->dataRef);
// taosFreeQitem(pDataSubmit);
}
}
......@@ -279,6 +280,42 @@ typedef struct {
SArray* res; // SArray<SSDataBlock>
} SStreamSinkReq;
typedef struct {
SMsgHead head;
int64_t streamId;
int32_t taskId;
} SStreamTaskRunReq;
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t sourceTaskId;
int32_t sourceVg;
#if 0
int64_t sourceVer;
#endif
SArray* data; // SArray<SSDataBlock>
} SStreamDispatchReq;
typedef struct {
int64_t streamId;
int32_t taskId;
int8_t inputStatus;
} SStreamDispatchRsp;
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t sourceTaskId;
int32_t sourceVg;
} SStreamTaskRecoverReq;
typedef struct {
int64_t streamId;
int32_t taskId;
int8_t inputStatus;
} SStreamTaskRecoverRsp;
int32_t streamEnqueueDataSubmit(SStreamTask* pTask, SStreamDataSubmit* input);
int32_t streamEnqueueDataBlk(SStreamTask* pTask, SStreamDataBlock* input);
int32_t streamDequeueOutput(SStreamTask* pTask, void** output);
......@@ -289,6 +326,12 @@ int32_t streamTaskRun(SStreamTask* pTask);
int32_t streamTaskHandleInput(SStreamTask* pTask, void* data);
int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb);
int32_t streamTaskProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
int32_t streamTaskProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp);
int32_t streamTaskProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg);
int32_t streamTaskProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp);
#ifdef __cplusplus
}
#endif
......
......@@ -20,20 +20,23 @@
extern "C" {
#endif
#include <stdbool.h>
#include <stdint.h>
//#include <tdatablock.h>
#include "cJSON.h"
#include "tdef.h"
//#include "taosdef.h"
//#include "trpc.h"
//#include "wal.h"
#include "tmsgcb.h"
#define SYNC_INDEX_BEGIN 0
#define SYNC_INDEX_INVALID -1
typedef uint64_t SyncNodeId;
typedef int32_t SyncGroupId;
typedef int64_t SyncIndex;
typedef uint64_t SyncTerm;
typedef struct SSyncNode SSyncNode;
typedef struct SSyncBuffer SSyncBuffer;
typedef struct SWal SWal;
typedef struct SSyncRaftEntry SSyncRaftEntry;
typedef enum {
TAOS_SYNC_STATE_FOLLOWER = 100,
TAOS_SYNC_STATE_CANDIDATE = 101,
......@@ -41,6 +44,17 @@ typedef enum {
TAOS_SYNC_STATE_ERROR = 103,
} ESyncState;
typedef enum {
TAOS_SYNC_PROPOSE_SUCCESS = 0,
TAOS_SYNC_PROPOSE_NOT_LEADER = 1,
TAOS_SYNC_PROPOSE_OTHER_ERROR = 2,
} ESyncProposeCode;
typedef enum {
TAOS_SYNC_FSM_CB_SUCCESS = 0,
TAOS_SYNC_FSM_CB_OTHER_ERROR = 1,
} ESyncFsmCbCode;
typedef struct SNodeInfo {
uint16_t nodePort;
char nodeFqdn[TSDB_FQDN_LEN];
......@@ -58,11 +72,6 @@ typedef struct SSnapshot {
SyncTerm lastApplyTerm;
} SSnapshot;
typedef enum {
TAOS_SYNC_FSM_CB_SUCCESS = 0,
TAOS_SYNC_FSM_CB_OTHER_ERROR,
} ESyncFsmCbCode;
typedef struct SFsmCbMeta {
SyncIndex index;
bool isWeak;
......@@ -71,27 +80,15 @@ typedef struct SFsmCbMeta {
uint64_t seqNum;
} SFsmCbMeta;
struct SRpcMsg;
typedef struct SRpcMsg SRpcMsg;
typedef struct SSyncFSM {
void* data;
void (*FpCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void (*FpPreCommitCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot);
} SSyncFSM;
struct SSyncRaftEntry;
typedef struct SSyncRaftEntry SSyncRaftEntry;
#define SYNC_INDEX_BEGIN 0
#define SYNC_INDEX_INVALID -1
// abstract definition of log store in raft
// SWal implements it
typedef struct SSyncLogStore {
......@@ -120,11 +117,6 @@ typedef struct SSyncLogStore {
} SSyncLogStore;
struct SWal;
typedef struct SWal SWal;
struct SEpSet;
typedef struct SEpSet SEpSet;
typedef struct SSyncInfo {
SyncGroupId vgId;
......@@ -132,12 +124,9 @@ typedef struct SSyncInfo {
char path[TSDB_FILENAME_LEN];
SWal* pWal;
SSyncFSM* pFsm;
void* rpcClient;
int32_t (*FpSendMsg)(void* rpcClient, const SEpSet* pEpSet, SRpcMsg* pMsg);
void* queue;
int32_t (*FpEqMsg)(void* queue, SRpcMsg* pMsg);
SMsgCb* msgcb;
int32_t (*FpSendMsg)(const SEpSet* pEpSet, SRpcMsg* pMsg);
int32_t (*FpEqMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg);
} SSyncInfo;
int32_t syncInit();
......@@ -152,27 +141,8 @@ const char* syncGetMyRoleStr(int64_t rid);
SyncTerm syncGetMyTerm(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
int32_t syncGetVgId(int64_t rid);
typedef enum {
TAOS_SYNC_PROPOSE_SUCCESS = 0,
TAOS_SYNC_PROPOSE_NOT_LEADER,
TAOS_SYNC_PROPOSE_OTHER_ERROR,
} ESyncProposeCode;
int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak);
bool syncEnvIsStart();
extern int32_t sDebugFlag;
//-----------------------------------------
struct SSyncNode;
typedef struct SSyncNode SSyncNode;
struct SSyncBuffer;
typedef struct SSyncBuffer SSyncBuffer;
//-----------------------------------------
int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak);
bool syncEnvIsStart();
const char* syncStr(ESyncState state);
#ifdef __cplusplus
......
......@@ -20,13 +20,7 @@
extern "C" {
#endif
#include <stdbool.h>
#include <stdint.h>
//#include <tdatablock.h>
#include "cJSON.h"
//#include "taosdef.h"
#include "trpc.h"
//#include "wal.h"
// ------------------ ds -------------------
typedef struct SRaftId {
......@@ -35,16 +29,12 @@ typedef struct SRaftId {
} SRaftId;
// ------------------ control -------------------
struct SSyncNode;
typedef struct SSyncNode SSyncNode;
SSyncNode* syncNodeAcquire(int64_t rid);
void syncNodeRelease(SSyncNode* pNode);
int32_t syncGetRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg);
int32_t syncGetAndDelRespRpc(int64_t rid, uint64_t index, SRpcMsg* msg);
void syncSetQ(int64_t rid, void* queueHandle);
void syncSetRpc(int64_t rid, void* rpcHandle);
void syncSetMsgCb(int64_t rid, const SMsgCb* msgcb);
char* sync2SimpleStr(int64_t rid);
// set timer ms
......
......@@ -38,7 +38,7 @@ typedef struct {
typedef struct SRpcHandleInfo {
// rpc info
void *handle; // rpc handle returned to app
void * handle; // rpc handle returned to app
int64_t refId; // refid, used by server
int32_t noResp; // has response or not(default 0, 0: resp, 1: no resp);
int32_t persistHandle; // persist handle or not
......@@ -49,13 +49,13 @@ typedef struct SRpcHandleInfo {
void *node; // node mgmt handle
// resp info
void *rsp;
void * rsp;
int32_t rspLen;
} SRpcHandleInfo;
typedef struct SRpcMsg {
tmsg_t msgType;
void *pCont;
void * pCont;
int32_t contLen;
int32_t code;
SRpcHandleInfo info;
......@@ -63,11 +63,6 @@ typedef struct SRpcMsg {
} SRpcMsg;
typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *rf);
typedef int (*RpcAfp)(void *parent, char *tableId, char *spi, char *encrypt, char *secret, char *ckey);
///
// // SRpcMsg code
// REDIERE,
// NOT READY, EpSet
typedef bool (*RpcRfp)(int32_t code);
typedef struct SRpcInit {
......@@ -80,18 +75,11 @@ typedef struct SRpcInit {
int idleTime; // milliseconds, 0 means idle timer is disabled
// the following is for client app ecurity only
char *user; // user name
char spi; // security parameter index
char encrypt; // encrypt algorithm
char *secret; // key for authentication
char *ckey; // ciphering key
char *user; // user name
// call back to process incoming msg, code shall be ignored by server app
RpcCfp cfp;
// call back to retrieve the client auth info, for server app only
RpcAfp afp;
// user defined retry func
RpcRfp rfp;
......
......@@ -38,6 +38,13 @@ typedef int32_t TdUcs4;
#define wcsncpy WCSNCPY_FUNC_TAOS_FORBID
#define wchar_t WCHAR_T_TYPE_TAOS_FORBID
#define strcasestr STR_CASE_STR_FORBID
#define strtoll STR_TO_LL_FUNC_TAOS_FORBID
#define strtoull STR_TO_ULL_FUNC_TAOS_FORBID
#define strtol STR_TO_L_FUNC_TAOS_FORBID
#define strtoul STR_TO_UL_FUNC_TAOS_FORBID
#define strtod STR_TO_LD_FUNC_TAOS_FORBID
#define strtold STR_TO_D_FUNC_TAOS_FORBID
#define strtof STR_TO_F_FUNC_TAOS_FORBID
#endif
#ifdef WINDOWS
......@@ -72,6 +79,17 @@ int32_t taosWcharsToMbs(char *pStrs, TdWchar *pWchars, int32_t size);
char *taosStrCaseStr(const char *str, const char *pattern);
int64_t taosStr2Int64(const char *str, char** pEnd, int32_t radix);
uint64_t taosStr2UInt64(const char *str, char** pEnd, int32_t radix);
int32_t taosStr2Int32(const char *str, char** pEnd, int32_t radix);
uint32_t taosStr2UInt32(const char *str, char** pEnd, int32_t radix);
int16_t taosStr2Int16(const char *str, char** pEnd, int32_t radix);
uint16_t taosStr2UInt16(const char *str, char** pEnd, int32_t radix);
int8_t taosStr2Int8(const char *str, char** pEnd, int32_t radix);
uint8_t taosStr2UInt8(const char *str, char** pEnd, int32_t radix);
double taosStr2Double(const char *str, char** pEnd);
float taosStr2Float(const char *str, char** pEnd);
#ifdef __cplusplus
}
#endif
......
......@@ -646,6 +646,11 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_ALTER_TABLE TAOS_DEF_ERROR_CODE(0, 0x2649)
#define TSDB_CODE_PAR_CANNOT_DROP_PRIMARY_KEY TAOS_DEF_ERROR_CODE(0, 0x264A)
#define TSDB_CODE_PAR_INVALID_MODIFY_COL TAOS_DEF_ERROR_CODE(0, 0x264B)
#define TSDB_CODE_PAR_INVALID_TBNAME TAOS_DEF_ERROR_CODE(0, 0x264C)
#define TSDB_CODE_PAR_INVALID_FUNCTION_NAME TAOS_DEF_ERROR_CODE(0, 0x264D)
#define TSDB_CODE_PAR_COMMENT_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x264E)
#define TSDB_CODE_PAR_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x264F)
#define TSDB_CODE_PAR_NOT_ALLOWED_WIN_QUERY TAOS_DEF_ERROR_CODE(0, 0x2650)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
......
......@@ -218,8 +218,8 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_SQL_SHOW_LEN 1024
#define TSDB_MAX_ALLOWED_SQL_LEN (1 * 1024 * 1024u) // sql length should be less than 1mb
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_STB_COMMENT_LEN 1024
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_TB_COMMENT_LEN 1025
/**
* In some scenarios uint16_t (0~65535) is used to store the row len.
......
......@@ -56,10 +56,10 @@ typedef enum { SSkipListPutSuccess = 0, SSkipListPutEarlyStop = 1, SSkipListPutS
typedef struct SSkipList {
uint32_t seed;
uint16_t len;
__compar_fn_t comparFn;
__sl_key_fn_t keyFn;
TdThreadRwlock *lock;
uint16_t len;
uint8_t maxLevel;
uint8_t flags;
uint8_t type; // static info above
......
[Unit]
Description=Nginx For TDengine Service
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=/usr/local/nginxd/logs/nginx.pid
ExecStart=/usr/local/nginxd/sbin/nginx
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=Nginx For TDengine Service
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=/usr/local/nginxd/logs/nginx.pid
ExecStart=/usr/local/nginxd/sbin/nginx
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
########################################################
# #
# TDengine Configuration #
# Any questions, please email support@taosdata.com #
# #
########################################################
# first fully qualified domain name (FQDN) for TDengine system
# firstEp hostname:6030
# local fully qualified domain name (FQDN)
# fqdn hostname
# first port number for the connection (12 continuous UDP/TCP port number are used)
# serverPort 6030
# log file's directory
# logDir /var/log/taos
# data file's directory
# dataDir /var/lib/taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of threads to commit cache data
# numOfCommitThreads 4
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
keepColumnName 1
# number of management nodes in the system
# numOfMnodes 1
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
# balance 1
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control blocks
# maxTmrCtrl 512
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# minimum sliding window time, milli-second
# minSlidingTime 10
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# cache block size (Mbyte)
# cache 16
# number of cache blocks per vnode
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# minimum rows of records in file block
# minRows 100
# maximum rows of records in file block
# maxRows 4096
# the number of acknowledgments required for successful data writing
# quorum 1
# enable/disable compression
# comp 2
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster only
# replica 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# query retrieved column data compression option:
# -1 (no compression)
# 0 (all retrieved column data compressed),
# > 0 (any retrieved column size greater than this value all data will be compressed.)
# compressColData -1
# max length of an SQL
# maxSQLLength 65480
# max length of WildCards
# maxWildCardsLength 100
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system time zone (for windows 10)
# timezone UTC-8
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections allowed in dnode
# maxShellConns 5000
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 1.0
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 1.0
# if disk free space is less than this value, taosd service exit directly within startup process
# minimalDataDirGB 2.0
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
# enbale/disable http service
# http 1
# enable/disable system monitor
# monitor 1
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# database name must be specified in restful interface if the following parameter is set, off by default
# httpDbNameMandatory 1
# http keep alive, default is 30 seconds
# httpKeepAlive 30000
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# time of keeping log files, days
# logKeepDays 0
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
# rpcDebugFlag 131
# debug flag for TAOS TIMER
# tmrDebugFlag 131
# debug flag for TDengine client
# cDebugFlag 131
# debug flag for JNI
# jniDebugFlag 131
# debug flag for storage
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
# debug flag for monitor
# monDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for vnode
# vDebugFlag 131
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query
# cqDebugFlag 131
# enable/disable recording the SQL in taos client
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable stream (continuous query)
# stream 1
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
# retrieveBlockingModel 0
# the maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
# tsdbMetaCompactRatio 0
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
# defaultJSONStrType nchar
# force TCP transmission
# rpcForceTcp 0
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
# walFlushSize 1024
# unit Hour. Latency of data migration
# keepTimeOffset 0
########################################################
# #
# TDengine Configuration #
# Any questions, please email support@taosdata.com #
# #
########################################################
# first fully qualified domain name (FQDN) for TDengine system
# firstEp hostname:6030
# local fully qualified domain name (FQDN)
# fqdn hostname
# first port number for the connection (12 continuous UDP/TCP port number are used)
# serverPort 6030
# log file's directory
# logDir /var/log/taos
# data file's directory
# dataDir /var/lib/taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6042
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of threads to commit cache data
# numOfCommitThreads 4
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
keepColumnName 1
# number of management nodes in the system
# numOfMnodes 1
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
# balance 1
# role for dnode. 0 - any, 1 - mnode, 2 - dnode
# role 0
# max timer control blocks
# maxTmrCtrl 512
# time interval of system monitor, seconds
# monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 864000
# RPC re-try timer, millisecond
# rpcTimer 300
# RPC maximum time for ack, seconds.
# rpcMaxTime 600
# time interval of dnode status reporting to mnode, seconds, for cluster only
# statusInterval 1
# time interval of heart beat from shell to dnode, seconds
# shellActivityTimer 3
# minimum sliding window time, milli-second
# minSlidingTime 10
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
# maxFirstStreamCompDelay 10000
# retry delay when a stream computation fails, milli-second
# retryStreamCompDelay 10
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
# streamCompDelayRatio 0.1
# max number of vgroups per db, 0 means configured automatically
# maxVgroupsPerDb 0
# max number of tables per vnode
# maxTablesPerVnode 1000000
# cache block size (Mbyte)
# cache 16
# number of cache blocks per vnode
# blocks 6
# number of days per DB file
# days 10
# number of days to keep DB file
# keep 3650
# minimum rows of records in file block
# minRows 100
# maximum rows of records in file block
# maxRows 4096
# the number of acknowledgments required for successful data writing
# quorum 1
# enable/disable compression
# comp 2
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
# walLevel 1
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
# fsync 3000
# number of replications, for cluster only
# replica 1
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# query retrieved column data compression option:
# -1 (no compression)
# 0 (all retrieved column data compressed),
# > 0 (any retrieved column size greater than this value all data will be compressed.)
# compressColData -1
# max length of an SQL
# maxSQLLength 65480
# max length of WildCards
# maxWildCardsLength 100
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# system time zone (for windows 10)
# timezone UTC-8
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# max number of connections allowed in dnode
# maxShellConns 5000
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 1.0
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 1.0
# if disk free space is less than this value, taosd service exit directly within startup process
# minimalDataDirGB 2.0
# One mnode is equal to the number of vnode consumed
# mnodeEqualVnodeNum 4
# enbale/disable http service
# http 1
# enable/disable system monitor
# monitor 1
# enable/disable recording the SQL statements via restful interface
# httpEnableRecordSql 0
# number of threads used to process http requests
# httpMaxThreads 2
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# database name must be specified in restful interface if the following parameter is set, off by default
# httpDbNameMandatory 1
# http keep alive, default is 30 seconds
# httpKeepAlive 30000
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
# asyncLog 1
# time of keeping log files, days
# logKeepDays 0
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
# rpcDebugFlag 131
# debug flag for TAOS TIMER
# tmrDebugFlag 131
# debug flag for TDengine client
# cDebugFlag 131
# debug flag for JNI
# jniDebugFlag 131
# debug flag for storage
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
# debug flag for monitor
# monDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for vnode
# vDebugFlag 131
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query
# cqDebugFlag 131
# enable/disable recording the SQL in taos client
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable stream (continuous query)
# stream 1
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
# retrieveBlockingModel 0
# the maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
# tsdbMetaCompactRatio 0
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
# defaultJSONStrType nchar
# force TCP transmission
# rpcForceTcp 0
# unit MB. Flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
# walFlushSize 1024
# unit Hour. Latency of data migration
# keepTimeOffset 0
[Unit]
Description=TDengine server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/taosd
ExecStartPre=/usr/local/taos/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=TDengine server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/taosd
ExecStartPre=/usr/local/taos/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=TDengine arbitrator service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/tarbitrator
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=TDengine arbitrator service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/tarbitrator
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
#!/bin/bash
#
# This file is used to install database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
verMode=edge
pagMode=full
iplist=""
serverFqdn=""
# -----------------------Variables definition---------------------
script_dir="../release"
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_install_dir="/etc/taos"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
#install main path
install_main_dir="/usr/local/taos"
# old bin dir
sbin_dir="/usr/local/taos/bin"
temp_version=""
fin_result=""
service_config_dir="/etc/systemd/system"
nginx_port=6060
nginx_dir="/usr/local/nginxd"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# ============================= get input parameters =================================================
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
# set parameters by default value
interactiveFqdn=yes # [yes | no]
verType=server # [server | client]
initType=systemd # [systemd | service | ...]
while getopts "hv:d:" arg
do
case $arg in
d)
#echo "interactiveFqdn=$OPTARG"
script_dir=$( echo $OPTARG )
;;
h)
echo "Usage: `basename $0` -d scripy_path"
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
function kill_process() {
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function check_file() {
#check file whether exists
if [ ! -e $1/$2 ];then
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
}
function get_package_name() {
var=$1
if [[ $1 =~ 'aarch' ]];then
echo ${var::-21}
else
echo ${var::-17}
fi
}
function check_link() {
#check Link whether exists or broken
if [ -L $1 ] ; then
if [ ! -e $1 ] ; then
echo -e "$1 \033[31Broken link\033[0m"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
else
echo -e "$1 \033[31mnot exists\033[0m!quit"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
}
function check_main_path() {
#check install main dir and all sub dir
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
for i in "${main_dir[@]}";do
check_file ${install_main_dir} $i
done
if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i
done
fi
echo -e "Check main path:\033[32mOK\033[0m!"
}
function check_bin_path() {
# check install bin dir and all sub dir
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i
done
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
if [ "$verMode" == "cluster" ]; then
check_file ${nginx_dir}/sbin nginx
fi
echo -e "Check bin path:\033[32mOK\033[0m!"
}
function check_lib_path() {
# check all links
check_link ${lib_link_dir}/libtaos.so
check_link ${lib_link_dir}/libtaos.so.1
if [[ -d ${lib64_link_dir} ]]; then
check_link ${lib64_link_dir}/libtaos.so
check_link ${lib64_link_dir}/libtaos.so.1
fi
echo -e "Check lib path:\033[32mOK\033[0m!"
}
function check_header_path() {
# check all header
header_dir=("taos.h" "taosdef.h" "taoserror.h")
for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i
done
echo -e "Check bin path:\033[32mOK\033[0m!"
}
function check_taosadapter_config_dir() {
# check all config
check_file ${cfg_install_dir} taosadapter.toml
check_file ${cfg_install_dir} taosadapter.service
check_file ${install_main_dir}/cfg taosadapter.toml.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
function check_config_dir() {
# check all config
check_file ${cfg_install_dir} taos.cfg
check_file ${install_main_dir}/cfg taos.cfg.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
function check_log_path() {
# check log path
check_file ${log_dir}
echo -e "Check log path:\033[32mOK\033[0m!"
}
function check_data_path() {
# check data path
check_file ${data_dir}
echo -e "Check data path:\033[32mOK\033[0m!"
}
function install_TDengine() {
cd ${script_dir}
tar zxf $1
temp_version=$(get_package_name $1)
cd $(get_package_name $1)
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
echo -e "\033[32mTDengine has been installed!\033[0m"
echo -e "\033[32mTDengine is starting...\033[0m"
kill_process taos && systemctl start taosd && sleep 10
}
function test_TDengine() {
check_main_path
check_bin_path
check_lib_path
check_header_path
check_config_dir
check_taosadapter_config_dir
check_log_path
check_data_path
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
if [[ $result =~ "Unable to establish" ]];then
echo -e "\033[31mTDengine connect failed\033[0m"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
}
# ## ==============================Main program starts from here============================
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
temp=`pwd`
for i in $TD_package_name;do
if [[ $i =~ 'enterprise' ]];then
verMode="cluster"
else
verMode=""
fi
cd $temp
install_TDengine $i
test_TDengine
done
echo "============================================================"
echo -e $fin_result
#!/bin/bash
#
# This file is used to install database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
verMode=edge
pagMode=full
iplist=""
serverFqdn=""
# -----------------------Variables definition---------------------
script_dir="../release"
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_install_dir="/etc/taos"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
#install main path
install_main_dir="/usr/local/taos"
# old bin dir
sbin_dir="/usr/local/taos/bin"
temp_version=""
fin_result=""
service_config_dir="/etc/systemd/system"
nginx_port=6060
nginx_dir="/usr/local/nginxd"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# ============================= get input parameters =================================================
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
# set parameters by default value
interactiveFqdn=yes # [yes | no]
verType=server # [server | client]
initType=systemd # [systemd | service | ...]
while getopts "hv:d:" arg
do
case $arg in
d)
#echo "interactiveFqdn=$OPTARG"
script_dir=$( echo $OPTARG )
;;
h)
echo "Usage: `basename $0` -d scripy_path"
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
function kill_process() {
pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function check_file() {
#check file whether exists
if [ ! -e $1/$2 ];then
echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
}
function get_package_name() {
var=$1
if [[ $1 =~ 'aarch' ]];then
echo ${var::-21}
else
echo ${var::-17}
fi
}
function check_link() {
#check Link whether exists or broken
if [ -L $1 ] ; then
if [ ! -e $1 ] ; then
echo -e "$1 \033[31Broken link\033[0m"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
else
echo -e "$1 \033[31mnot exists\033[0m!quit"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
}
function check_main_path() {
#check install main dir and all sub dir
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
for i in "${main_dir[@]}";do
check_file ${install_main_dir} $i
done
if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i
done
fi
echo -e "Check main path:\033[32mOK\033[0m!"
}
function check_bin_path() {
# check install bin dir and all sub dir
bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i
done
lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
if [ "$verMode" == "cluster" ]; then
check_file ${nginx_dir}/sbin nginx
fi
echo -e "Check bin path:\033[32mOK\033[0m!"
}
function check_lib_path() {
# check all links
check_link ${lib_link_dir}/libtaos.so
check_link ${lib_link_dir}/libtaos.so.1
if [[ -d ${lib64_link_dir} ]]; then
check_link ${lib64_link_dir}/libtaos.so
check_link ${lib64_link_dir}/libtaos.so.1
fi
echo -e "Check lib path:\033[32mOK\033[0m!"
}
function check_header_path() {
# check all header
header_dir=("taos.h" "taosdef.h" "taoserror.h")
for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i
done
echo -e "Check bin path:\033[32mOK\033[0m!"
}
function check_taosadapter_config_dir() {
# check all config
check_file ${cfg_install_dir} taosadapter.toml
check_file ${cfg_install_dir} taosadapter.service
check_file ${install_main_dir}/cfg taosadapter.toml.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
function check_config_dir() {
# check all config
check_file ${cfg_install_dir} taos.cfg
check_file ${install_main_dir}/cfg taos.cfg.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
function check_log_path() {
# check log path
check_file ${log_dir}
echo -e "Check log path:\033[32mOK\033[0m!"
}
function check_data_path() {
# check data path
check_file ${data_dir}
echo -e "Check data path:\033[32mOK\033[0m!"
}
function install_TDengine() {
cd ${script_dir}
tar zxf $1
temp_version=$(get_package_name $1)
cd $(get_package_name $1)
echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
echo -e "\033[32mTDengine has been installed!\033[0m"
echo -e "\033[32mTDengine is starting...\033[0m"
kill_process taos && systemctl start taosd && sleep 10
}
function test_TDengine() {
check_main_path
check_bin_path
check_lib_path
check_header_path
check_config_dir
check_taosadapter_config_dir
check_log_path
check_data_path
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
if [[ $result =~ "Unable to establish" ]];then
echo -e "\033[31mTDengine connect failed\033[0m"
fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
echo -e $fin_result
exit 8
fi
echo -e "Check TDengine connect:\033[32mOK\033[0m!"
fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
}
# ## ==============================Main program starts from here============================
TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
temp=`pwd`
for i in $TD_package_name;do
if [[ $i =~ 'enterprise' ]];then
verMode="cluster"
else
verMode=""
fi
cd $temp
install_TDengine $i
test_TDengine
done
echo "============================================================"
echo -e $fin_result
Package: tdengine
Version: 1.0.0
Section: utils
Priority: optional
#Essential: no
#Depends: no
#Suggests: no
Architecture: amd64
Installed-Size: 66666
Maintainer: support@taosdata.com
Provides: taosdata
Homepage: http://taosdata.com
Description: Big Data Platform Designed and Optimized for IoT.
Package: tdengine
Version: 1.0.0
Section: utils
Priority: optional
#Essential: no
#Depends: no
#Suggests: no
Architecture: amd64
Installed-Size: 66666
Maintainer: support@taosdata.com
Provides: taosdata
Homepage: http://taosdata.com
Description: Big Data Platform Designed and Optimized for IoT.
#!/bin/bash
#set -x
#path=`pwd`
insmetaPath="/usr/local/taos/script"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}chmod -R 744 ${insmetaPath}
cd ${insmetaPath}
${csudo}./post.sh
#!/bin/bash
#set -x
#path=`pwd`
insmetaPath="/usr/local/taos/script"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}chmod -R 744 ${insmetaPath}
cd ${insmetaPath}
${csudo}./post.sh
#!/bin/bash
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# Stop the service if running
if pidof taosd &> /dev/null; then
if pidof systemd &> /dev/null; then
${csudo}systemctl stop taosd || :
elif $(which service &> /dev/null); then
${csudo}service taosd stop || :
else
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
echo "Stop taosd service success!"
sleep 1
fi
# if taos.cfg already softlink, remove it
cfg_install_dir="/etc/taos"
install_main_dir="/usr/local/taos"
if [ -f "${install_main_dir}/taos.cfg" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
fi
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
fi
if [ -f "${install_main_dir}/taosadapter.service" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :
#!/bin/bash
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
# Stop the service if running
if pidof taosd &> /dev/null; then
if pidof systemd &> /dev/null; then
${csudo}systemctl stop taosd || :
elif $(which service &> /dev/null); then
${csudo}service taosd stop || :
else
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
echo "Stop taosd service success!"
sleep 1
fi
# if taos.cfg already softlink, remove it
cfg_install_dir="/etc/taos"
install_main_dir="/usr/local/taos"
if [ -f "${install_main_dir}/taos.cfg" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || :
fi
if [ -f "${install_main_dir}/taosadapter.toml" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.toml || :
fi
if [ -f "${install_main_dir}/taosadapter.service" ]; then
${csudo}rm -f ${install_main_dir}/cfg/taosadapter.service || :
fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos* || :
#!/bin/bash
insmetaPath="/usr/local/taos/script"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}chmod -R 744 ${insmetaPath} || :
#cd ${insmetaPath}
#${csudo}./preun.sh
if [ -f ${insmetaPath}/preun.sh ]; then
cd ${insmetaPath}
${csudo}./preun.sh
else
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
#!/bin/bash
insmetaPath="/usr/local/taos/script"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
fi
${csudo}chmod -R 744 ${insmetaPath} || :
#cd ${insmetaPath}
#${csudo}./preun.sh
if [ -f ${insmetaPath}/preun.sh ]; then
cd ${insmetaPath}
${csudo}./preun.sh
else
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_link_dir="/usr/local/taos/cfg"
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
fi
#!/bin/bash
#
# Generate deb package for ubuntu
set -e
# set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/debworkroom"
#echo "curr_dir: ${curr_dir}"
#echo "top_dir: ${top_dir}"
#echo "script_dir: ${script_dir}"
echo "compile_dir: ${compile_dir}"
echo "pkg_dir: ${pkg_dir}"
if [ -d ${pkg_dir} ]; then
rm -rf ${pkg_dir}
fi
mkdir -p ${pkg_dir}
cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}"
# create install dir
install_home_path="/usr/local/taos"
mkdir -p ${pkg_dir}${install_home_path}
mkdir -p ${pkg_dir}${install_home_path}/bin
mkdir -p ${pkg_dir}${install_home_path}/cfg
#mkdir -p ${pkg_dir}${install_home_path}/connector
mkdir -p ${pkg_dir}${install_home_path}/driver
mkdir -p ${pkg_dir}${install_home_path}/examples
mkdir -p ${pkg_dir}${install_home_path}/include
#mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
fi
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
install_user_local_path="/usr/local"
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/bin/jeprof ]; then
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
fi
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
fi
fi
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/*
# modify version of control
debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name
if [ "$verMode" == "cluster" ]; then
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
elif [ "$verType" == "stable" ]; then
debname=${debname}".deb"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# make deb package
dpkg -b ${pkg_dir} $debname
echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir}
# clean temp dir
rm -rf ${pkg_dir}
#!/bin/bash
#
# Generate deb package for ubuntu
set -e
# set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/debworkroom"
#echo "curr_dir: ${curr_dir}"
#echo "top_dir: ${top_dir}"
#echo "script_dir: ${script_dir}"
echo "compile_dir: ${compile_dir}"
echo "pkg_dir: ${pkg_dir}"
if [ -d ${pkg_dir} ]; then
rm -rf ${pkg_dir}
fi
mkdir -p ${pkg_dir}
cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}"
# create install dir
install_home_path="/usr/local/taos"
mkdir -p ${pkg_dir}${install_home_path}
mkdir -p ${pkg_dir}${install_home_path}/bin
mkdir -p ${pkg_dir}${install_home_path}/cfg
#mkdir -p ${pkg_dir}${install_home_path}/connector
mkdir -p ${pkg_dir}${install_home_path}/driver
mkdir -p ${pkg_dir}${install_home_path}/examples
mkdir -p ${pkg_dir}${install_home_path}/include
#mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg || :
fi
#cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
#cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
#cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
#cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
install_user_local_path="/usr/local"
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/bin/jeprof ]; then
cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/
fi
if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then
cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi
if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then
cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/
fi
if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then
cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/
fi
fi
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/*
# modify version of control
debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name
if [ "$verMode" == "cluster" ]; then
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
debname="TDengine-server-"${tdengine_ver}-${verType}-${osType}-${cpuType}".deb"
elif [ "$verType" == "stable" ]; then
debname=${debname}".deb"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# make deb package
dpkg -b ${pkg_dir} $debname
echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir}
# clean temp dir
rm -rf ${pkg_dir}
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: TDengine
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts TDengine taosd
# Description: Starts TDengine taosd, a time-series database engine
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="TDengine"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/taosd"
DAEMON_OPTS=""
HTTPD_NAME="taosadapter"
DAEMON_HTTPD_NAME=$HTTPD_NAME
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting TDengine..."
$DAEMON_HTTPD &
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping TDengine..."
pkill -9 $DAEMON_HTTPD_NAME
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop TDengine (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "TDengine was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: TDengine
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts TDengine taosd
# Description: Starts TDengine taosd, a time-series database engine
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="TDengine"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/taosd"
DAEMON_OPTS=""
HTTPD_NAME="taosadapter"
DAEMON_HTTPD_NAME=$HTTPD_NAME
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting TDengine..."
$DAEMON_HTTPD &
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping TDengine..."
pkill -9 $DAEMON_HTTPD_NAME
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop TDengine (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "TDengine was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: taoscluster
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts taoscluster tarbitrator
# Description: Starts taoscluster tarbitrator, a arbitrator
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="taoscluster"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/tarbitrator"
DAEMON_OPTS=""
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting tarbitrator..."
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping tarbitrator..."
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "tarbitrator was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: taoscluster
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts taoscluster tarbitrator
# Description: Starts taoscluster tarbitrator, a arbitrator
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="taoscluster"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/tarbitrator"
DAEMON_OPTS=""
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting tarbitrator..."
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping tarbitrator..."
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop tarbitrator (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "tarbitrator was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0
FROM ubuntu:18.04
WORKDIR /root
ARG pkgFile
ARG dirName
ARG cpuType
RUN echo ${pkgFile} && echo ${dirName}
COPY ${pkgFile} /root/
RUN tar -zxf ${pkgFile}
WORKDIR /root/
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
RUN rm /root/${pkgFile}
RUN rm -rf /root/${dirName}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
COPY ./bin/* /usr/bin/
ENV TINI_VERSION v0.19.0
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
RUN chmod +x /tini
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
CMD ["taosd"]
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
FROM ubuntu:18.04
WORKDIR /root
ARG pkgFile
ARG dirName
ARG cpuType
RUN echo ${pkgFile} && echo ${dirName}
COPY ${pkgFile} /root/
RUN tar -zxf ${pkgFile}
WORKDIR /root/
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
RUN rm /root/${pkgFile}
RUN rm -rf /root/${dirName}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
COPY ./bin/* /usr/bin/
ENV TINI_VERSION v0.19.0
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
RUN chmod +x /tini
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
CMD ["taosd"]
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
此差异已折叠。
#!/bin/sh
set -e
# for TZ awareness
if [ "$TZ" != "" ]; then
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
echo $TZ >/etc/timezone
fi
# option to disable taosadapter, default is no
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
unset TAOS_DISABLE_ADAPTER
# to get mnodeEpSet from data dir
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
# append env to custom taos.cfg
CFG_DIR=/tmp/taos
CFG_FILE=$CFG_DIR/taos.cfg
mkdir -p $CFG_DIR >/dev/null 2>&1
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
env-to-cfg >>$CFG_FILE
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
# ensure the fqdn is resolved as localhost
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
# parse first ep host and port
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
# in case of custom server port
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
SERVER_PORT=${SERVER_PORT:-6030}
# for other binaries like interpreters
if echo $1 | grep -E "taosd$" - >/dev/null; then
true # will run taosd
else
cp -f $CFG_FILE /etc/taos/taos.cfg || true
$@
exit $?
fi
set +e
ulimit -c unlimited
# set core files pattern, maybe failed
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
set -e
if [ "$DISABLE_ADAPTER" = "0" ]; then
which taosadapter >/dev/null && taosadapter &
# wait for 6041 port ready
for _ in $(seq 1 20); do
nc -z localhost 6041 && break
sleep 0.5
done
fi
# if has mnode ep set or the host is first ep or not for cluster, just start.
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
$@ -c $CFG_DIR
# others will first wait the first ep ready.
else
if [ "$TAOS_FIRST_EP" = "" ]; then
echo "run TDengine with single node."
$@ -c $CFG_DIR
exit $?
fi
while true; do
es=0
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
if [ "$es" -eq 0 ]; then
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
break
fi
sleep 1s
done
$@ -c $CFG_DIR
fi
#!/bin/sh
set -e
# for TZ awareness
if [ "$TZ" != "" ]; then
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
echo $TZ >/etc/timezone
fi
# option to disable taosadapter, default is no
DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
unset TAOS_DISABLE_ADAPTER
# to get mnodeEpSet from data dir
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
# append env to custom taos.cfg
CFG_DIR=/tmp/taos
CFG_FILE=$CFG_DIR/taos.cfg
mkdir -p $CFG_DIR >/dev/null 2>&1
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
env-to-cfg >>$CFG_FILE
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
# ensure the fqdn is resolved as localhost
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
# parse first ep host and port
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
# in case of custom server port
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
SERVER_PORT=${SERVER_PORT:-6030}
# for other binaries like interpreters
if echo $1 | grep -E "taosd$" - >/dev/null; then
true # will run taosd
else
cp -f $CFG_FILE /etc/taos/taos.cfg || true
$@
exit $?
fi
set +e
ulimit -c unlimited
# set core files pattern, maybe failed
sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1
set -e
if [ "$DISABLE_ADAPTER" = "0" ]; then
which taosadapter >/dev/null && taosadapter &
# wait for 6041 port ready
for _ in $(seq 1 20); do
nc -z localhost 6041 && break
sleep 0.5
done
fi
# if has mnode ep set or the host is first ep or not for cluster, just start.
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
$@ -c $CFG_DIR
# others will first wait the first ep ready.
else
if [ "$TAOS_FIRST_EP" = "" ]; then
echo "run TDengine with single node."
$@ -c $CFG_DIR
exit $?
fi
while true; do
es=0
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
if [ "$es" -eq 0 ]; then
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
break
fi
sleep 1s
done
$@ -c $CFG_DIR
fi
#!/bin/sh
set -e
self=$0
snake_to_camel_case() {
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
}
if echo $1 | grep -E "^$" - >/dev/null; then
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
else
snake_to_camel_case $1
fi
#!/bin/sh
set -e
self=$0
snake_to_camel_case() {
echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}'
}
if echo $1 | grep -E "^$" - >/dev/null; then
export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh
else
snake_to_camel_case $1
fi
version: "3"
networks:
inter:
api:
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
networks:
- inter
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TOAS_SECOND_EP: "td-2"
deploy:
replicas: 4
update_config:
parallelism: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
- api
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
version: "3"
networks:
inter:
api:
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
networks:
- inter
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TOAS_SECOND_EP: "td-2"
deploy:
replicas: 4
update_config:
parallelism: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
- api
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
此差异已折叠。
此差异已折叠。
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=aarch64
verNumber=""
passWord=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
scriptDir=`pwd`
pkgDir=$scriptDir/../../release/
cp -f ${pkgDir}/${pkgFile} .
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
rm -f ${pkgFile}
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=aarch64
verNumber=""
passWord=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
scriptDir=`pwd`
pkgDir=$scriptDir/../../release/
cp -f ${pkgDir}/${pkgFile} .
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
rm -f ${pkgFile}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
len=$(echo ${#OS})
len=$((len-2))
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
echo -ne $retval
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
len=$(echo ${#OS})
len=$((len-2))
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
echo -ne $retval
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
@echo off
goto %1
:needAdmin
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)&goto :eof
:hasAdmin
cp -f C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
文件模式从 100644 更改为 100755
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
文件模式从 100644 更改为 100755
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册