提交 fde0db68 编写于 作者: H Hongze Cheng

Merge branch '3.0' of https://github.com/taosdata/TDengine into fix/hzcheng_3.0

......@@ -4,9 +4,6 @@
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url = git@github.com:taosdata/hivemq-tdengine-extension.git
[submodule "deps/jemalloc"]
path = deps/jemalloc
url = https://github.com/jemalloc/jemalloc
[submodule "deps/TSZ"]
path = deps/TSZ
url = https://github.com/taosdata/TSZ.git
......
......@@ -84,6 +84,12 @@ ELSE ()
ENDIF ()
ENDIF ()
option(
JEMALLOC_ENABLED
"If build with jemalloc"
OFF
)
option(
BUILD_SANITIZER
"If build sanitizer"
......
# jemalloc
ExternalProject_Add(jemalloc
GIT_REPOSITORY https://github.com/jemalloc/jemalloc.git
GIT_TAG 5.3.0
SOURCE_DIR "${TD_CONTRIB_DIR}/jemalloc"
BINARY_DIR ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
GIT_SHALLOW true
GIT_PROGRESS true
)
......@@ -2,7 +2,7 @@
# zlib
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 817cb6a
GIT_TAG 2.1.1
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -27,6 +27,10 @@ else ()
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
if(TD_LINUX_64 AND JEMALLOC_ENABLED)
cat("${TD_SUPPORT_DIR}/jemalloc_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
# pthread
if(${BUILD_PTHREAD})
cat("${TD_SUPPORT_DIR}/pthread_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
......@@ -399,6 +403,18 @@ if(${BUILD_ADDR2LINE})
endif(NOT ${TD_WINDOWS})
endif(${BUILD_ADDR2LINE})
# jemalloc
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
include(ExternalProject)
ExternalProject_Add(jemalloc
PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
BUILD_COMMAND ${MAKE}
)
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
ENDIF ()
# ================================================================================================
# Build test
......
此差异已折叠。
......@@ -594,6 +594,24 @@ INSERT INTO tb_name VALUES (TODAY(), ...);
TDengine 支持针对数据的聚合查询。提供如下聚合函数。
### APERCENTILE
```sql
SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
**返回数据类型**: DOUBLE。
**适用数据类型**:数值类型。
**适用于**:表和超级表。
**说明**
- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
### AVG
```sql
......@@ -656,6 +674,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。
- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。
### LEASTSQUARES
```sql
......@@ -671,21 +690,6 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
**适用于**:表。
### MODE
```sql
SELECT MODE(field_name) FROM tb_name [WHERE clause];
```
**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出NULL。
**返回数据类型**:与输入数据类型一致。
**适用数据类型**:全部类型字段。
**适用于**:表和超级表。
### SPREAD
```sql
......@@ -778,28 +782,26 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。
## 选择函数
选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。
### APERCENTILE
### PERCENTILE
```sql
SELECT APERCENTILE(field_name, P[, algo_type])
FROM { tb_name | stb_name } [WHERE clause]
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
```
**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果
**功能说明**:统计表中某列的值百分比分位数
**返回数据类型**: DOUBLE。
**适用数据类型**:数值类型。
**应用字段**:数值类型。
**适用于**:表和超级表
**适用于**:表。
**说明**
- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
**使用说明***P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。
## 选择函数
选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。
### BOTTOM
......@@ -935,21 +937,41 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
**适用于**:表和超级表。
### PERCENTILE
### MODE
```sql
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
SELECT MODE(field_name) FROM tb_name [WHERE clause];
```
**功能说明**统计表中某列的值百分比分位数
**功能说明**返回出现频率最高的值,若存在多个频率相同的最高值,输出NULL
**返回数据类型** DOUBLE
**返回数据类型**与输入数据类型一致
**应用字段**:数值类型
**适用数据类型**:全部类型字段
**适用于**:表。
**适用于**:表和超级表
**使用说明***P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。
### SAMPLE
```sql
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
**嵌套子查询支持**: 适用于内层查询和外层查询。
**适用于**:表和超级表。
**使用说明**
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
### TAIL
......@@ -1016,7 +1038,7 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。
**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 返回结果中同时带有每行记录对应的时间戳。
**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。
**适用数据类型**:数值类型。
......@@ -1045,8 +1067,10 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
**适用于**:表和超级表。
**使用说明**: DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
**使用说明**:
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
### DIFF
......@@ -1062,7 +1086,10 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
**适用于**:表和超级表。
**使用说明**: 输出结果行数是范围内总行数减一,第一行没有结果输出。
**使用说明**:
- 输出结果行数是范围内总行数减一,第一行没有结果输出。
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。
### IRATE
......@@ -1102,26 +1129,6 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### SAMPLE
```sql
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
**嵌套子查询支持**: 适用于内层查询和外层查询。
**适用于**:表和超级表。
**使用说明**
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
### STATECOUNT
......@@ -1162,7 +1169,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
- val : 数值型
- unit : 时间长度的单位,范围[1s、1m、1h ],不足一个单位舍去。默认为 1s
- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度
**返回结果类型**:INTEGER。
......
......@@ -55,7 +55,6 @@ enum {
TASK_INPUT_STATUS__NORMAL = 1,
TASK_INPUT_STATUS__BLOCKED,
TASK_INPUT_STATUS__RECOVER,
TASK_INPUT_STATUS__PROCESSING,
TASK_INPUT_STATUS__STOP,
TASK_INPUT_STATUS__FAILED,
};
......@@ -320,17 +319,6 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeSStreamTask(SStreamTask* pTask);
static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) {
#if 0
while (1) {
int8_t inputStatus =
atomic_val_compare_exchange_8(&pTask->inputStatus, TASK_INPUT_STATUS__NORMAL, TASK_INPUT_STATUS__PROCESSING);
if (inputStatus == TASK_INPUT_STATUS__NORMAL) {
break;
}
ASSERT(0);
}
#endif
if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
SStreamDataSubmit* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit*)pItem);
if (pSubmitClone == NULL) {
......@@ -443,13 +431,14 @@ typedef struct {
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t sourceTaskId;
int32_t sourceVg;
int32_t upstreamTaskId;
int32_t upstreamNodeId;
} SStreamTaskRecoverReq;
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t rspTaskId;
int32_t reqTaskId;
int8_t inputStatus;
} SStreamTaskRecoverRsp;
......
......@@ -210,7 +210,7 @@ SyncGroupId syncGetVgId(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak);
int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize);
int32_t syncProposeBatch(int64_t rid, SRpcMsg** pMsgPArr, bool* pIsWeakArr, int32_t arrSize);
bool syncEnvIsStart();
const char* syncStr(ESyncState state);
bool syncIsRestoreFinish(int64_t rid);
......
......@@ -238,7 +238,7 @@ typedef struct SyncClientRequestBatch {
char data[]; // block2, block3
} SyncClientRequestBatch;
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMeta* raftArr, int32_t arrSize,
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg** rpcMsgPArr, SRaftMeta* raftArr, int32_t arrSize,
int32_t vgId);
void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg);
void syncClientRequestBatchDestroy(SyncClientRequestBatch* pMsg);
......
......@@ -257,14 +257,13 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)},
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)},
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)},
{TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
{TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
// {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
// {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)},
{TSDB_INS_TABLE_USER_DATABASES, userDBSchema, tListLen(userDBSchema)},
{TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
{TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
{TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)},
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
{TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)},
{TSDB_INS_TABLE_USER_TAGS, userTagsSchema, tListLen(userTagsSchema)},
// {TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
......
......@@ -1713,7 +1713,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows;
printf("%s |block type %d |child id %d|group id %zX\n", flag, (int32_t)pDataBlock->info.type,
printf("%s |block type %d |child id %d|group id %" PRIu64 "\n", flag, (int32_t)pDataBlock->info.type,
pDataBlock->info.childId, pDataBlock->info.groupId);
for (int32_t j = 0; j < rows; j++) {
printf("%s |", flag);
......
......@@ -14,4 +14,7 @@ target_include_directories(
taosd
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/node_mgmt/inc"
)
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
add_dependencies(taosd jemalloc)
ENDIF ()
target_link_libraries(taosd dnode)
......@@ -179,7 +179,7 @@ static int32_t sndProcessTaskRecoverRsp(SSnode *pNode, SRpcMsg *pMsg) {
SStreamMeta *pMeta = pNode->pMeta;
SStreamTaskRecoverRsp *pRsp = pMsg->pCont;
int32_t taskId = pRsp->taskId;
int32_t taskId = pRsp->rspTaskId;
SStreamTask *pTask = *(SStreamTask **)taosHashGet(pMeta->pHash, &taskId, sizeof(int32_t));
streamProcessRecoverRsp(pTask, pRsp);
return 0;
......
......@@ -599,14 +599,14 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
SSubmitReq *pReq = NULL;
// TODO: the schema update should be handled
if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) < 0) {
smaError("vgId:%d, build submit req for rsma table %" PRIi64 "l evel %" PRIi8 " failed since %s", SMA_VID(pSma),
smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma),
suid, pItem->level, terrstr());
goto _err;
}
if (pReq && tdProcessSubmitReq(sinkTsdb, output->info.version, pReq) < 0) {
taosMemoryFreeClear(pReq);
smaError("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " failed since %s",
smaError("vgId:%d, process submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
SMA_VID(pSma), suid, pItem->level, terrstr());
goto _err;
}
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "sma.h"
static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppData);
static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
// SRsmaSnapReader ========================================
struct SRsmaSnapReader {
SSma* pSma;
int64_t sver;
int64_t ever;
// for data file
int8_t rsmaDataDone[TSDB_RETENTION_L2];
STsdbSnapReader* pDataReader[TSDB_RETENTION_L2];
// for qtaskinfo file
int8_t qTaskDone;
SQTaskFReader* pQTaskFReader;
};
int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader) {
int32_t code = 0;
SRsmaSnapReader* pReader = NULL;
// alloc
pReader = (SRsmaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
if (pReader == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
pReader->pSma = pSma;
pReader->sver = sver;
pReader->ever = ever;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pSma->pRSmaTsdb[i]) {
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, &pReader->pDataReader[i]);
if (code < 0) {
goto _err;
}
}
}
*ppReader = pReader;
smaInfo("vgId:%d vnode snapshot rsma reader opened succeed", SMA_VID(pSma));
return TSDB_CODE_SUCCESS;
_err:
smaError("vgId:%d vnode snapshot rsma reader opened failed since %s", SMA_VID(pSma), tstrerror(code));
return TSDB_CODE_FAILED;
}
static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppData) {
int32_t code = 0;
SSma* pSma = pReader->pSma;
_exit:
smaInfo("vgId:%d vnode snapshot rsma read qtaskinfo succeed", SMA_VID(pSma));
return code;
_err:
smaError("vgId:%d vnode snapshot rsma read qtaskinfo failed since %s", SMA_VID(pSma), tstrerror(code));
return code;
}
int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData) {
int32_t code = 0;
*ppData = NULL;
smaInfo("vgId:%d vnode snapshot rsma read entry", SMA_VID(pReader->pSma));
// read rsma1/rsma2 file
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
STsdbSnapReader* pTsdbSnapReader = pReader->pDataReader[i];
if (!pTsdbSnapReader) {
continue;
}
if (!pReader->rsmaDataDone[i]) {
smaInfo("vgId:%d vnode snapshot rsma read level %d not done", SMA_VID(pReader->pSma), i);
code = tsdbSnapRead(pTsdbSnapReader, ppData);
if (code) {
goto _err;
} else {
if (*ppData) {
goto _exit;
} else {
pReader->rsmaDataDone[i] = 1;
}
}
} else {
smaInfo("vgId:%d vnode snapshot rsma read level %d is done", SMA_VID(pReader->pSma), i);
}
}
// read qtaskinfo file
if (!pReader->qTaskDone) {
code = rsmaSnapReadQTaskInfo(pReader, ppData);
if (code) {
goto _err;
} else {
if (*ppData) {
goto _exit;
} else {
pReader->qTaskDone = 1;
}
}
}
_exit:
smaInfo("vgId:%d vnode snapshot rsma read succeed", SMA_VID(pReader->pSma));
return code;
_err:
smaError("vgId:%d vnode snapshot rsma read failed since %s", SMA_VID(pReader->pSma), tstrerror(code));
return code;
}
int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
int32_t code = 0;
SRsmaSnapReader* pReader = *ppReader;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pReader->pDataReader[i]) {
tsdbSnapReaderClose(&pReader->pDataReader[i]);
}
}
if (pReader->pQTaskFReader) {
// TODO: close for qtaskinfo
smaInfo("vgId:%d vnode snapshot rsma reader closed for qTaskInfo", SMA_VID(pReader->pSma));
}
smaInfo("vgId:%d vnode snapshot rsma reader closed", SMA_VID(pReader->pSma));
taosMemoryFreeClear(*ppReader);
return code;
}
// SRsmaSnapWriter ========================================
struct SRsmaSnapWriter {
SSma* pSma;
int64_t sver;
int64_t ever;
// config
int64_t commitID;
// for data file
STsdbSnapWriter* pDataWriter[TSDB_RETENTION_L2];
// for qtaskinfo file
SQTaskFReader* pQTaskFReader;
SQTaskFWriter* pQTaskFWriter;
};
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter) {
int32_t code = 0;
SRsmaSnapWriter* pWriter = NULL;
// alloc
pWriter = (SRsmaSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter));
if (pWriter == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
pWriter->pSma = pSma;
pWriter->sver = sver;
pWriter->ever = ever;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pSma->pRSmaTsdb[i]) {
code = tsdbSnapWriterOpen(pSma->pRSmaTsdb[i], sver, ever, &pWriter->pDataWriter[i]);
if (code < 0) {
goto _err;
}
}
}
// qtaskinfo
// TODO
*ppWriter = pWriter;
smaInfo("vgId:%d rsma snapshot writer open succeed", TD_VID(pSma->pVnode));
return code;
_err:
smaError("vgId:%d rsma snapshot writer open failed since %s", TD_VID(pSma->pVnode), tstrerror(code));
*ppWriter = NULL;
return code;
}
int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
int32_t code = 0;
SRsmaSnapWriter* pWriter = *ppWriter;
if (rollback) {
ASSERT(0);
// code = tsdbFSRollback(pWriter->pTsdb->pFS);
// if (code) goto _err;
} else {
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pWriter->pDataWriter[i]) {
code = tsdbSnapWriterClose(&pWriter->pDataWriter[i], rollback);
if (code) goto _err;
}
}
}
taosMemoryFree(pWriter);
*ppWriter = NULL;
smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
return code;
_err:
smaError("vgId:%d vnode snapshot rsma writer close failed since %s", SMA_VID(pWriter->pSma), tstrerror(code));
return code;
}
int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
// rsma1/rsma2
if (pHdr->type == SNAP_DATA_RSMA1) {
pHdr->type = SNAP_DATA_TSDB;
code = tsdbSnapWrite(pWriter->pDataWriter[0], pData, nData);
} else if (pHdr->type == SNAP_DATA_RSMA2) {
pHdr->type = SNAP_DATA_TSDB;
code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData);
} else if (pHdr->type == SNAP_DATA_QTASK) {
code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData);
}
if (code < 0) goto _err;
_exit:
smaInfo("vgId:%d rsma snapshot write for data %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
return code;
_err:
smaError("vgId:%d rsma snapshot write for data %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
tstrerror(code));
return code;
}
static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
if (pWriter->pQTaskFWriter == NULL) {
// SDelFile* pDelFile = pWriter->fs.pDelFile;
// // reader
// if (pDelFile) {
// code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb, NULL);
// if (code) goto _err;
// code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR, NULL);
// if (code) goto _err;
// }
// // writer
// SDelFile delFile = {.commitID = pWriter->commitID, .offset = 0, .size = 0};
// code = tsdbDelFWriterOpen(&pWriter->pDelFWriter, &delFile, pTsdb);
// if (code) goto _err;
}
smaInfo("vgId:%d vnode snapshot rsma write qtaskinfo succeed", SMA_VID(pWriter->pSma));
_exit:
return code;
_err:
smaError("vgId:%d vnode snapshot rsma write qtaskinfo failed since %s", SMA_VID(pWriter->pSma), tstrerror(code));
return code;
}
......@@ -796,7 +796,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskRecoverRsp* pRsp = pMsg->pCont;
int32_t taskId = pRsp->taskId;
int32_t taskId = pRsp->rspTaskId;
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
if (ppTask) {
streamProcessRecoverRsp(*ppTask, pRsp);
......
......@@ -413,7 +413,7 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
SyncClientRequestBatch *pSyncMsg = syncClientRequestBatchFromRpcMsg(pMsg);
ASSERT(pSyncMsg != NULL);
code = syncNodeOnClientRequestBatchCb(pSyncNode, pSyncMsg);
syncClientRequestBatchDestroyDeep(pSyncMsg);
syncClientRequestBatchDestroy(pSyncMsg);
} else if (pMsg->msgType == TDMT_SYNC_REQUEST_VOTE) {
SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pMsg);
ASSERT(pSyncMsg != NULL);
......
......@@ -318,6 +318,7 @@ typedef struct STableScanInfo {
int32_t currentTable;
int8_t scanMode;
int8_t noTable;
int8_t assignBlockUid;
} STableScanInfo;
typedef struct STableMergeScanInfo {
......
......@@ -264,6 +264,12 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo);
if (pTaskInfo->tableqinfoList.map == NULL) {
pTaskInfo->tableqinfoList.map =
taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
}
taosHashPut(pTaskInfo->tableqinfoList.map, uid, sizeof(uid), &keyInfo.groupId, sizeof(keyInfo.groupId));
}
if (keyBuf != NULL) {
......
......@@ -408,6 +408,10 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
pBlock->info.groupId = *groupId;
}
if (pTableScanInfo->assignBlockUid) {
pBlock->info.groupId = pBlock->info.uid;
}
pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
......@@ -616,6 +620,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
pInfo->scanFlag = MAIN_SCAN;
pInfo->pColMatchInfo = pColList;
pInfo->currentGroupId = -1;
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
pOperator->name = "TableScanOperator"; // for debug purpose
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN;
......
......@@ -1091,6 +1091,8 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, scanFlag, true);
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
}
......
......@@ -1522,6 +1522,7 @@ static const char* jkTableScanPhysiPlanWatermark = "Watermark";
static const char* jkTableScanPhysiPlanIgnoreExpired = "IgnoreExpired";
static const char* jkTableScanPhysiPlanGroupTags = "GroupTags";
static const char* jkTableScanPhysiPlanGroupSort = "GroupSort";
static const char* jkTableScanPhysiPlanAssignBlockUid = "AssignBlockUid";
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
......@@ -1578,6 +1579,9 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanGroupSort, pNode->groupSort);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanAssignBlockUid, pNode->assignBlockUid);
}
return code;
}
......@@ -1637,6 +1641,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanGroupSort, &pNode->groupSort);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanAssignBlockUid, &pNode->assignBlockUid);
}
return code;
}
......@@ -4525,7 +4532,6 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
return jsonToPhysiScanNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
return jsonToPhysiLastRowScanNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
......
......@@ -2271,7 +2271,7 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp
FOREACH(pAggTarget, pAgg->pTargets) {
SNode* pScanTarget = NULL;
FOREACH(pScanTarget, pScanNode->node.pTargets) {
if (0 == strcmp(((SColumnNode*)pAggTarget)->colName, ((SColumnNode*)pAggTarget)->colName)) {
if (0 == strcmp(((SColumnNode*)pAggTarget)->colName, ((SColumnNode*)pScanTarget)->colName)) {
nodesListAppend(pScanTargets, nodesCloneNode(pScanTarget));
break;
}
......
......@@ -168,20 +168,20 @@ int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, v
return TSDB_CODE_SUCCESS;
}
// Note: no more task error processing, handled in function internal
int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode) {
if (TSDB_CODE_SCH_IGNORE_ERROR == errCode) {
return TSDB_CODE_SCH_IGNORE_ERROR;
}
int8_t status = 0;
if (schJobNeedToStop(pJob, &status)) {
SCH_TASK_DLOG("no more task failure processing cause of job status %s", jobTaskStatusStr(status));
int8_t jobStatus = 0;
if (schJobNeedToStop(pJob, &jobStatus)) {
SCH_TASK_DLOG("no more task failure processing cause of job status %s", jobTaskStatusStr(jobStatus));
SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR);
}
if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXEC) {
SCH_TASK_ELOG("task already not in EXEC status, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
int8_t taskStatus = SCH_GET_TASK_STATUS(pTask);
if (taskStatus == JOB_TASK_STATUS_FAIL || taskStatus == JOB_TASK_STATUS_SUCC) {
SCH_TASK_ELOG("task already done, status:%s", jobTaskStatusStr(taskStatus));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
......
......@@ -32,10 +32,10 @@ typedef struct {
static SStreamGlobalEnv streamEnv;
int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb);
int32_t streamExec(SStreamTask* pTask);
int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum);
int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb);
int32_t streamDispatch(SStreamTask* pTask);
int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData);
int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData);
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data);
......
......@@ -189,7 +189,7 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
#if 0
if (pTask->execType != TASK_EXEC__NONE) {
#endif
streamExec(pTask, pTask->pMsgCb);
streamExec(pTask);
#if 0
} else {
ASSERT(pTask->sinkType != TASK_SINK__NONE);
......@@ -208,7 +208,7 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
// 3.2 dispatch / sink
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
ASSERT(pTask->sinkType == TASK_SINK__NONE);
streamDispatch(pTask, pTask->pMsgCb);
streamDispatch(pTask);
}
return 0;
......@@ -233,26 +233,55 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp) {
return 0;
}
// continue dispatch
streamDispatch(pTask, pTask->pMsgCb);
streamDispatch(pTask);
return 0;
}
int32_t streamProcessRunReq(SStreamTask* pTask) {
streamExec(pTask, pTask->pMsgCb);
streamExec(pTask);
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
streamDispatch(pTask, pTask->pMsgCb);
streamDispatch(pTask);
}
return 0;
}
int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg) {
//
int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pRsp) {
void* buf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamTaskRecoverRsp));
((SMsgHead*)buf)->vgId = htonl(pReq->upstreamNodeId);
SStreamTaskRecoverRsp* pCont = POINTER_SHIFT(buf, sizeof(SMsgHead));
pCont->inputStatus = pTask->inputStatus;
pCont->streamId = pTask->streamId;
pCont->reqTaskId = pTask->taskId;
pCont->rspTaskId = pReq->upstreamTaskId;
pRsp->pCont = buf;
pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamTaskRecoverRsp);
tmsgSendRsp(pRsp);
return 0;
}
int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) {
//
if (pRsp->inputStatus == TASK_INPUT_STATUS__NORMAL) {
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
streamProcessRunReq(pTask);
if (pTask->isDataScan) {
// scan data to recover
pTask->inputStatus = TASK_INPUT_STATUS__RECOVER;
pTask->taskStatus = TASK_STATUS__RECOVERING;
qStreamPrepareRecover(pTask->exec.executor, pTask->startVer, pTask->recoverSnapVer);
if (streamPipelineExec(pTask, 100) < 0) {
return -1;
}
} else {
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
pTask->taskStatus = TASK_STATUS__NORMAL;
}
}
return 0;
}
......@@ -262,10 +291,10 @@ int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, S
streamTaskEnqueueRetrieve(pTask, pReq, pRsp);
ASSERT(pTask->execType != TASK_EXEC__NONE);
streamExec(pTask, pTask->pMsgCb);
streamExec(pTask);
ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);
streamDispatch(pTask, pTask->pMsgCb);
streamDispatch(pTask);
return 0;
}
......
......@@ -438,7 +438,7 @@ FAIL:
return code;
}
int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb) {
int32_t streamDispatch(SStreamTask* pTask) {
ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);
#if 1
int8_t old =
......
......@@ -141,7 +141,7 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum) {
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
ASSERT(pTask->sinkType == TASK_SINK__NONE);
streamDispatch(pTask, pTask->pMsgCb);
streamDispatch(pTask);
}
}
......@@ -229,7 +229,7 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
}
// TODO: handle version
int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) {
int32_t streamExec(SStreamTask* pTask) {
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
if (pRes == NULL) return -1;
while (1) {
......
......@@ -19,8 +19,8 @@ int32_t tEncodeStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamTaskRecover
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->sourceTaskId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->sourceVg) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1;
tEndEncode(pEncoder);
return pEncoder->pos;
}
......@@ -29,8 +29,8 @@ int32_t tDecodeStreamTaskRecoverReq(SDecoder* pDecoder, SStreamTaskRecoverReq* p
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->sourceTaskId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->sourceVg) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1;
tEndDecode(pDecoder);
return 0;
}
......@@ -38,7 +38,8 @@ int32_t tDecodeStreamTaskRecoverReq(SDecoder* pDecoder, SStreamTaskRecoverReq* p
int32_t tEncodeStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamTaskRecoverRsp* pRsp) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pRsp->streamId) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->taskId) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->reqTaskId) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->rspTaskId) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->inputStatus) < 0) return -1;
tEndEncode(pEncoder);
return pEncoder->pos;
......@@ -47,7 +48,8 @@ int32_t tEncodeStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamTaskRecover
int32_t tDecodeStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamTaskRecoverRsp* pReq) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->reqTaskId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->rspTaskId) < 0) return -1;
if (tDecodeI8(pDecoder, &pReq->inputStatus) < 0) return -1;
tEndDecode(pDecoder);
return 0;
......@@ -125,7 +127,7 @@ int32_t streamProcessFailRecoverReq(SStreamTask* pTask, SMStreamTaskRecoverReq*
}
if (pTask->taskStatus == TASK_STATUS__RECOVERING) {
if (streamPipelineExec(pTask, 10) < 0) {
if (streamPipelineExec(pTask, 100) < 0) {
// set fail
return -1;
}
......
......@@ -170,7 +170,7 @@ void syncNodeStart(SSyncNode* pSyncNode);
void syncNodeStartStandBy(SSyncNode* pSyncNode);
void syncNodeClose(SSyncNode* pSyncNode);
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak);
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize);
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg** pMsgPArr, bool* pIsWeakArr, int32_t arrSize);
// option
bool syncNodeSnapshotEnable(SSyncNode* pSyncNode);
......
......@@ -677,7 +677,7 @@ int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak) {
return ret;
}
int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize) {
int32_t syncProposeBatch(int64_t rid, SRpcMsg** pMsgPArr, bool* pIsWeakArr, int32_t arrSize) {
if (arrSize < 0) {
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
return -1;
......@@ -690,18 +690,18 @@ int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_
}
ASSERT(rid == pSyncNode->rid);
int32_t ret = syncNodeProposeBatch(pSyncNode, pMsgArr, pIsWeakArr, arrSize);
int32_t ret = syncNodeProposeBatch(pSyncNode, pMsgPArr, pIsWeakArr, arrSize);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
return ret;
}
static bool syncNodeBatchOK(SRpcMsg* pMsgArr, int32_t arrSize) {
static bool syncNodeBatchOK(SRpcMsg** pMsgPArr, int32_t arrSize) {
for (int32_t i = 0; i < arrSize; ++i) {
if (pMsgArr[i].msgType == TDMT_SYNC_CONFIG_CHANGE) {
if (pMsgPArr[i]->msgType == TDMT_SYNC_CONFIG_CHANGE) {
return false;
}
if (pMsgArr[i].msgType == TDMT_SYNC_CONFIG_CHANGE_FINISH) {
if (pMsgPArr[i]->msgType == TDMT_SYNC_CONFIG_CHANGE_FINISH) {
return false;
}
}
......@@ -709,8 +709,8 @@ static bool syncNodeBatchOK(SRpcMsg* pMsgArr, int32_t arrSize) {
return true;
}
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize) {
if (!syncNodeBatchOK(pMsgArr, arrSize)) {
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg** pMsgPArr, bool* pIsWeakArr, int32_t arrSize) {
if (!syncNodeBatchOK(pMsgPArr, arrSize)) {
syncNodeErrorLog(pSyncNode, "sync propose batch error");
terrno = TSDB_CODE_SYN_BATCH_ERROR;
return -1;
......@@ -736,16 +736,23 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWe
SRaftMeta raftArr[SYNC_MAX_BATCH_SIZE];
for (int i = 0; i < arrSize; ++i) {
do {
char eventLog[128];
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d, batch:%d", TMSG_INFO(pMsgPArr[i]->msgType),
pMsgPArr[i]->msgType, arrSize);
syncNodeEventLog(pSyncNode, eventLog);
} while (0);
SRespStub stub;
stub.createTime = taosGetTimestampMs();
stub.rpcMsg = pMsgArr[i];
stub.rpcMsg = *(pMsgPArr[i]);
uint64_t seqNum = syncRespMgrAdd(pSyncNode->pSyncRespMgr, &stub);
raftArr[i].isWeak = pIsWeakArr[i];
raftArr[i].seqNum = seqNum;
}
SyncClientRequestBatch* pSyncMsg = syncClientRequestBatchBuild(pMsgArr, raftArr, arrSize, pSyncNode->vgId);
SyncClientRequestBatch* pSyncMsg = syncClientRequestBatchBuild(pMsgPArr, raftArr, arrSize, pSyncNode->vgId);
ASSERT(pSyncMsg != NULL);
SRpcMsg rpcMsg;
......@@ -759,7 +766,7 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWe
SRpcMsg* msgArr = syncClientRequestBatchRpcMsgArr(pSyncMsg);
ASSERT(arrSize == pSyncMsg->dataCount);
for (int i = 0; i < arrSize; ++i) {
pMsgArr[i].info.conn.applyIndex = msgArr[i].info.conn.applyIndex;
pMsgPArr[i]->info.conn.applyIndex = msgArr[i].info.conn.applyIndex;
syncRespMgrDel(pSyncNode->pSyncRespMgr, raftArr[i].seqNum);
}
......@@ -790,9 +797,11 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWe
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
int32_t ret = 0;
char eventLog[128];
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d", TMSG_INFO(pMsg->msgType), pMsg->msgType);
syncNodeEventLog(pSyncNode, eventLog);
do {
char eventLog[128];
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d", TMSG_INFO(pMsg->msgType), pMsg->msgType);
syncNodeEventLog(pSyncNode, eventLog);
} while (0);
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
if (pSyncNode->changing && pMsg->msgType != TDMT_SYNC_CONFIG_CHANGE_FINISH) {
......@@ -860,7 +869,8 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
} else {
ret = -1;
terrno = TSDB_CODE_SYN_NOT_LEADER;
sError("vgId:%d, sync propose not leader, %s", pSyncNode->vgId, syncUtilState2String(pSyncNode->state));
sError("vgId:%d, sync propose not leader, %s, msgtype:%s,%d", pSyncNode->vgId,
syncUtilState2String(pSyncNode->state), TMSG_INFO(pMsg->msgType), pMsg->msgType);
goto _END;
}
......
......@@ -963,9 +963,9 @@ void syncClientRequestLog2(char* s, const SyncClientRequest* pMsg) {
// block2: SRaftMeta array
// block3: rpc msg array (with pCont)
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMeta* raftArr, int32_t arrSize,
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg** rpcMsgPArr, SRaftMeta* raftArr, int32_t arrSize,
int32_t vgId) {
ASSERT(rpcMsgArr != NULL);
ASSERT(rpcMsgPArr != NULL);
ASSERT(arrSize > 0);
int32_t dataLen = 0;
......@@ -991,7 +991,7 @@ SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMet
raftMetaArr[i].seqNum = raftArr[i].seqNum;
// init msgArr
msgArr[i] = rpcMsgArr[i];
msgArr[i] = *(rpcMsgPArr[i]);
}
return pMsg;
......
......@@ -22,25 +22,25 @@ done
echo ""
echo "generate vgId ..."
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | sort | uniq > ${logpath}/log.vgIds.tmp
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort -T. | uniq | awk -F: '{print $2, $0}' | sort -T. -k1 -n | awk '{print $2}' > ${logpath}/log.vgIds.tmp
echo "all vgIds:" > ${logpath}/log.vgIds
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort -T. | uniq | awk -F: '{print $2, $0}' | sort -T. -k1 -n | awk '{print $2}' >> ${logpath}/log.vgIds
for dnode in `ls ${logpath} | grep dnode | grep -v log`;do
echo "" >> ${logpath}/log.vgIds
echo "" >> ${logpath}/log.vgIds
echo "${dnode}:" >> ${logpath}/log.vgIds
cat ${logpath}/${dnode}/log/taosdlog.* | grep SYN | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
cat ${logpath}/${dnode}/log/taosdlog.* | grep SYN | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort -T. | uniq | awk -F: '{print $2, $0}' | sort -T. -k1 -n | awk '{print $2}' >> ${logpath}/log.vgIds
done
echo ""
echo "generate log.dnode.vgId ..."
for logdnode in `ls ${logpath}/log.dnode*`;do
for vgId in `cat ${logpath}/log.vgIds.tmp`;do
rowNum=`cat ${logdnode} | grep "${vgId}" | awk 'BEGIN{rowNum=0}{rowNum++}END{print rowNum}'`
rowNum=`cat ${logdnode} | grep "${vgId}," | awk 'BEGIN{rowNum=0}{rowNum++}END{print rowNum}'`
#echo "-----${rowNum}"
if [ $rowNum -gt 0 ] ; then
echo "generate ${logdnode}.${vgId}"
cat ${logdnode} | grep "${vgId}" > ${logdnode}.${vgId}
cat ${logdnode} | grep "${vgId}," > ${logdnode}.${vgId}
fi
done
done
......@@ -54,7 +54,7 @@ done
echo ""
echo "generate log.leader.term ..."
cat ${logpath}/*.main | grep "become leader" | grep -v "config change" | awk '{print $5,$0}' | awk -F, '{print $4"_"$0}' | sort -k1 > ${logpath}/log.leader.term
cat ${logpath}/*.main | grep "become leader" | grep -v "config change" | awk '{print $5,$0}' | awk -F, '{print $4"_"$0}' | sort -T. -k1 > ${logpath}/log.leader.term
echo ""
echo "generate log.index, log.snapshot, log.records, log.actions ..."
......
......@@ -28,12 +28,12 @@ SRpcMsg *createRpcMsg(int32_t i, int32_t dataLen) {
}
SyncClientRequestBatch *createMsg() {
SRpcMsg rpcMsgArr[5];
memset(rpcMsgArr, 0, sizeof(rpcMsgArr));
SRpcMsg *rpcMsgPArr[5];
memset(rpcMsgPArr, 0, sizeof(rpcMsgPArr));
for (int32_t i = 0; i < 5; ++i) {
SRpcMsg *pRpcMsg = createRpcMsg(i, 20);
rpcMsgArr[i] = *pRpcMsg;
taosMemoryFree(pRpcMsg);
rpcMsgPArr[i] = pRpcMsg;
//taosMemoryFree(pRpcMsg);
}
SRaftMeta raftArr[5];
......@@ -43,7 +43,7 @@ SyncClientRequestBatch *createMsg() {
raftArr[i].isWeak = i % 2;
}
SyncClientRequestBatch *pMsg = syncClientRequestBatchBuild(rpcMsgArr, raftArr, 5, 1234);
SyncClientRequestBatch *pMsg = syncClientRequestBatchBuild(rpcMsgPArr, raftArr, 5, 1234);
return pMsg;
}
......
add_executable(transportTest "")
add_executable(transUT "")
add_executable(pushServer "")
add_executable(svrBench "")
add_executable(cliBench "")
target_sources(transUT
PRIVATE
......@@ -12,9 +13,13 @@ target_sources(transportTest
"transportTests.cpp"
)
target_sources(pushServer
target_sources(svrBench
PRIVATE
"pushServer.c"
"svrBench.c"
)
target_sources(cliBench
PRIVATE
"cliBench.c"
)
target_include_directories(transportTest
......@@ -45,13 +50,37 @@ target_include_directories(transUT
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_include_directories(pushServer
target_include_directories(svrBench
PUBLIC
"${TD_SOURCE_DIR}/include/libs/transport"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_include_directories(svrBench
PUBLIC
"${TD_SOURCE_DIR}/include/libs/transport"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_link_libraries (svrBench
os
util
common
gtest_main
transport
)
target_include_directories(cliBench
PUBLIC
"${TD_SOURCE_DIR}/include/libs/transport"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_include_directories(cliBench
PUBLIC
"${TD_SOURCE_DIR}/include/libs/transport"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_link_libraries (pushServer
target_link_libraries (cliBench
os
util
common
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h"
#include "taoserror.h"
#include "tglobal.h"
#include "transLog.h"
#include "trpc.h"
#include "tutil.h"
typedef struct {
int index;
SEpSet epSet;
int num;
int numOfReqs;
int msgSize;
tsem_t rspSem;
tsem_t *pOverSem;
TdThread thread;
void *pRpc;
} SInfo;
static void processResponse(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
SInfo *pInfo = (SInfo *)pMsg->info.ahandle;
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen,
pMsg->code);
if (pEpSet) pInfo->epSet = *pEpSet;
rpcFreeCont(pMsg->pCont);
tsem_post(&pInfo->rspSem);
}
static int tcount = 0;
static void *sendRequest(void *param) {
SInfo *pInfo = (SInfo *)param;
SRpcMsg rpcMsg = {0};
tDebug("thread:%d, start to send request", pInfo->index);
while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {
pInfo->num++;
rpcMsg.pCont = rpcMallocCont(pInfo->msgSize);
rpcMsg.contLen = pInfo->msgSize;
rpcMsg.info.ahandle = pInfo;
rpcMsg.msgType = 1;
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL);
if (pInfo->num % 20000 == 0) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num);
tsem_wait(&pInfo->rspSem);
}
tDebug("thread:%d, it is over", pInfo->index);
tcount++;
return NULL;
}
int main(int argc, char *argv[]) {
SRpcInit rpcInit;
SEpSet epSet;
int msgSize = 128;
int numOfReqs = 0;
int appThreads = 1;
char serverIp[40] = "127.0.0.1";
struct timeval systemTime;
int64_t startTime, endTime;
// server info
epSet.numOfEps = 1;
epSet.inUse = 0;
epSet.eps[0].port = 7000;
epSet.eps[1].port = 7000;
strcpy(epSet.eps[0].fqdn, serverIp);
strcpy(epSet.eps[1].fqdn, "192.168.0.1");
// client info
memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localPort = 0;
rpcInit.label = "APP";
rpcInit.numOfThreads = 1;
rpcInit.cfp = processResponse;
rpcInit.sessions = 100;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "michael";
rpcInit.connType = TAOS_CONN_CLIENT;
rpcDebugFlag = 131;
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-p") == 0 && i < argc - 1) {
} else if (strcmp(argv[i], "-i") == 0 && i < argc - 1) {
} else if (strcmp(argv[i], "-t") == 0 && i < argc - 1) {
rpcInit.numOfThreads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-m") == 0 && i < argc - 1) {
msgSize = atoi(argv[++i]);
} else if (strcmp(argv[i], "-s") == 0 && i < argc - 1) {
rpcInit.sessions = atoi(argv[++i]);
} else if (strcmp(argv[i], "-n") == 0 && i < argc - 1) {
numOfReqs = atoi(argv[++i]);
} else if (strcmp(argv[i], "-a") == 0 && i < argc - 1) {
appThreads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-o") == 0 && i < argc - 1) {
tsCompressMsgSize = atoi(argv[++i]);
} else if (strcmp(argv[i], "-u") == 0 && i < argc - 1) {
} else if (strcmp(argv[i], "-k") == 0 && i < argc - 1) {
} else if (strcmp(argv[i], "-spi") == 0 && i < argc - 1) {
} else if (strcmp(argv[i], "-d") == 0 && i < argc - 1) {
rpcDebugFlag = atoi(argv[++i]);
} else {
printf("\nusage: %s [options] \n", argv[0]);
printf(" [-i ip]: first server IP address, default is:%s\n", serverIp);
printf(" [-t threads]: number of rpc threads, default is:%d\n", rpcInit.numOfThreads);
printf(" [-m msgSize]: message body size, default is:%d\n", msgSize);
printf(" [-a threads]: number of app threads, default is:%d\n", appThreads);
printf(" [-n requests]: number of requests per thread, default is:%d\n", numOfReqs);
printf(" [-u user]: user name for the connection, default is:%s\n", rpcInit.user);
printf(" [-d debugFlag]: debug flag, default:%d\n", rpcDebugFlag);
printf(" [-h help]: print out this help\n\n");
exit(0);
}
}
taosInitLog("client.log", 100000);
void *pRpc = rpcOpen(&rpcInit);
if (pRpc == NULL) {
tError("failed to initialize RPC");
return -1;
}
tInfo("client is initialized");
tInfo("threads:%d msgSize:%d requests:%d", appThreads, msgSize, numOfReqs);
int64_t now = taosGetTimestampUs();
SInfo *pInfo = (SInfo *)taosMemoryCalloc(1, sizeof(SInfo) * appThreads);
SInfo *p = pInfo;
for (int i = 0; i < appThreads; ++i) {
pInfo->index = i;
pInfo->epSet = epSet;
pInfo->numOfReqs = numOfReqs;
pInfo->msgSize = msgSize;
tsem_init(&pInfo->rspSem, 0, 0);
pInfo->pRpc = pRpc;
taosThreadCreate(&pInfo->thread, NULL, sendRequest, pInfo);
pInfo++;
}
do {
taosUsleep(1);
} while (tcount < appThreads);
float usedTime = (taosGetTimestampUs() - now) / 1000.0f;
tInfo("it takes %.3f mseconds to send %d requests to server", usedTime, numOfReqs * appThreads);
tInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0 * numOfReqs * appThreads / usedTime, msgSize);
for (int i = 0; i < appThreads; i++) {
SInfo *pInfo = p;
taosThreadJoin(pInfo->thread, NULL);
p++;
}
int ch = getchar();
UNUSED(ch);
taosCloseLog();
return 0;
}
......@@ -24,12 +24,12 @@ int msgSize = 128;
int commit = 0;
TdFilePtr pDataFile = NULL;
STaosQueue *qhandle = NULL;
STaosQset * qset = NULL;
STaosQset *qset = NULL;
void processShellMsg() {
static int num = 0;
STaosQall *qall;
SRpcMsg * pRpcMsg, rpcMsg;
SRpcMsg *pRpcMsg, rpcMsg;
int type;
SQueueInfo qinfo = {0};
......@@ -77,7 +77,6 @@ void processShellMsg() {
taosFreeQitem(pRpcMsg);
{
// taosSsleep(1);
SRpcMsg nRpcMsg = {0};
nRpcMsg.pCont = rpcMallocCont(msgSize);
nRpcMsg.contLen = msgSize;
......@@ -93,26 +92,6 @@ void processShellMsg() {
taosFreeQall(qall);
}
int retrieveAuthInfo(void *parent, char *meterId, char *spi, char *encrypt, char *secret, char *ckey) {
// app shall retrieve the auth info based on meterID from DB or a data file
// demo code here only for simple demo
int ret = 0;
if (strcmp(meterId, "michael") == 0) {
*spi = 1;
*encrypt = 0;
strcpy(secret, "mypassword");
strcpy(ckey, "key");
} else if (strcmp(meterId, "jeff") == 0) {
*spi = 0;
*encrypt = 0;
} else {
ret = -1; // user not there
}
return ret;
}
void processRequestMsg(void *pParent, SRpcMsg *pMsg, SEpSet *pEpSet) {
SRpcMsg *pTemp;
......@@ -131,11 +110,12 @@ int main(int argc, char *argv[]) {
memset(&rpcInit, 0, sizeof(rpcInit));
rpcInit.localPort = 7000;
memcpy(rpcInit.localFqdn, "localhost", strlen("localhost"));
rpcInit.label = "SER";
rpcInit.numOfThreads = 1;
rpcInit.cfp = processRequestMsg;
rpcInit.sessions = 1000;
rpcInit.idleTime = 2 * 1500;
rpcDebugFlag = 131;
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-p") == 0 && i < argc - 1) {
......@@ -170,7 +150,7 @@ int main(int argc, char *argv[]) {
tsAsyncLog = 0;
rpcInit.connType = TAOS_CONN_SERVER;
taosInitLog("server.log", 10);
taosInitLog("server.log", 100000);
void *pRpc = rpcOpen(&rpcInit);
if (pRpc == NULL) {
......
......@@ -294,7 +294,7 @@ void taosArraySet(SArray* pArray, size_t index, void* pData) {
void taosArrayPopFrontBatch(SArray* pArray, size_t cnt) {
assert(cnt <= pArray->size);
pArray->size = pArray->size - cnt;
if (pArray->size == 0) {
if (pArray->size == 0 || cnt == 0) {
return;
}
memmove(pArray->pData, (char*)pArray->pData + cnt * pArray->elemSize, pArray->size * pArray->elemSize);
......
......@@ -16,8 +16,8 @@
#define _DEFAULT_SOURCE
#include "tlog.h"
#include "os.h"
#include "tutil.h"
#include "tconfig.h"
#include "tutil.h"
#define LOG_MAX_LINE_SIZE (1024)
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
......@@ -40,7 +40,7 @@
#define LOG_BUF_MUTEX(x) ((x)->buffMutex)
typedef struct {
char * buffer;
char *buffer;
int32_t buffStart;
int32_t buffEnd;
int32_t buffSize;
......@@ -59,15 +59,15 @@ typedef struct {
int32_t openInProgress;
pid_t pid;
char logName[LOG_FILE_NAME_LEN];
SLogBuff * logHandle;
SLogBuff *logHandle;
TdThreadMutex logMutex;
} SLogObj;
extern SConfig *tsCfg;
static int8_t tsLogInited = 0;
static SLogObj tsLogObj = {.fileNum = 1};
static int64_t tsAsyncLogLostLines = 0;
static int32_t tsWriteInterval = LOG_DEFAULT_INTERVAL;
static int8_t tsLogInited = 0;
static SLogObj tsLogObj = {.fileNum = 1};
static int64_t tsAsyncLogLostLines = 0;
static int32_t tsWriteInterval = LOG_DEFAULT_INTERVAL;
bool tsLogEmbedded = 0;
bool tsAsyncLog = true;
......@@ -106,7 +106,7 @@ int64_t dbgSmallWN = 0;
int64_t dbgBigWN = 0;
int64_t dbgWSize = 0;
static void * taosAsyncOutputLog(void *param);
static void *taosAsyncOutputLog(void *param);
static int32_t taosPushLogBuffer(SLogBuff *pLogBuf, const char *msg, int32_t msgLen);
static SLogBuff *taosLogBuffNew(int32_t bufSize);
static void taosCloseLogByFd(TdFilePtr pFile);
......@@ -128,7 +128,11 @@ int32_t taosInitLog(const char *logName, int32_t maxFiles) {
osUpdate();
char fullName[PATH_MAX] = {0};
snprintf(fullName, PATH_MAX, "%s" TD_DIRSEP "%s", tsLogDir, logName);
if (strlen(tsLogDir) != 0) {
snprintf(fullName, PATH_MAX, "%s" TD_DIRSEP "%s", tsLogDir, logName);
} else {
snprintf(fullName, PATH_MAX, "%s", logName);
}
tsLogObj.logHandle = taosLogBuffNew(LOG_DEFAULT_BUF_SIZE);
if (tsLogObj.logHandle == NULL) return -1;
......@@ -704,7 +708,7 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) {
int32_t compressSize = 163840;
int32_t ret = 0;
int32_t len = 0;
char * data = taosMemoryMalloc(compressSize);
char *data = taosMemoryMalloc(compressSize);
// gzFile dstFp = NULL;
// srcFp = fopen(srcFileName, "r");
......
......@@ -489,7 +489,7 @@ class TDDnode:
onlyKillOnceWindows = 0
while(processID):
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
killCmd = "kill -4 %s > /dev/null 2>&1" % processID
os.system(killCmd)
onlyKillOnceWindows = 1
time.sleep(1)
......@@ -503,7 +503,7 @@ class TDDnode:
time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
tdLog.debug("dnode:%d is stopped by kill -4" % (self.index))
def stoptaosd(self):
......@@ -527,7 +527,7 @@ class TDDnode:
onlyKillOnceWindows = 0
while(processID):
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
killCmd = "kill -4 %s > /dev/null 2>&1" % processID
os.system(killCmd)
onlyKillOnceWindows = 1
time.sleep(1)
......@@ -537,7 +537,7 @@ class TDDnode:
time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
tdLog.debug("dnode:%d is stopped by kill -4" % (self.index))
def forcestop(self):
if (not self.remoteIP == ""):
......
......@@ -298,8 +298,8 @@
./test.sh -f tsim/sma/drop_sma.sim
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
# temp disable
#./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
# --- valgrind
./test.sh -f tsim/valgrind/checkError1.sim
......@@ -325,7 +325,7 @@
# --- sync
./test.sh -f tsim/sync/3Replica1VgElect.sim
./test.sh -f tsim/sync/3Replica5VgElect.sim
#./test.sh -f tsim/sync/3Replica5VgElect.sim
./test.sh -f tsim/sync/oneReplica1VgElect.sim
./test.sh -f tsim/sync/oneReplica5VgElect.sim
......
......@@ -75,61 +75,61 @@ if $data02 != leader then
return -1
endi
print =============== create drop bnode 1
sql create bnode on dnode 1
sql show bnodes
if $rows != 1 then
return -1
endi
if $data00 != 1 then
return -1
endi
sql_error create bnode on dnode 1
sql drop bnode on dnode 1
sql show bnodes
if $rows != 0 then
return -1
endi
sql_error drop bnode on dnode 1
print =============== create drop bnode 2
sql create bnode on dnode 2
sql show bnodes
if $rows != 1 then
return -1
endi
if $data00 != 2 then
return -1
endi
sql_error create bnode on dnode 2
sql drop bnode on dnode 2
sql show bnodes
if $rows != 0 then
return -1
endi
sql_error drop bnode on dnode 2
print =============== create drop bnodes
sql create bnode on dnode 1
sql create bnode on dnode 2
sql show bnodes
if $rows != 2 then
return -1
endi
print =============== restart
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 2000
sql show bnodes
if $rows != 2 then
return -1
endi
#print =============== create drop bnode 1
#sql create bnode on dnode 1
#sql show bnodes
#if $rows != 1 then
# return -1
#endi
#if $data00 != 1 then
# return -1
#endi
#sql_error create bnode on dnode 1
#
#sql drop bnode on dnode 1
#sql show bnodes
#if $rows != 0 then
# return -1
#endi
#sql_error drop bnode on dnode 1
#
#print =============== create drop bnode 2
#sql create bnode on dnode 2
#sql show bnodes
#if $rows != 1 then
# return -1
#endi
#if $data00 != 2 then
# return -1
#endi
#sql_error create bnode on dnode 2
#
#sql drop bnode on dnode 2
#sql show bnodes
#if $rows != 0 then
# return -1
#endi
#sql_error drop bnode on dnode 2
#
#print =============== create drop bnodes
#sql create bnode on dnode 1
#sql create bnode on dnode 2
#sql show bnodes
#if $rows != 2 then
# return -1
#endi
#print =============== restart
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
#system sh/exec.sh -n dnode2 -s stop -x SIGINT
#system sh/exec.sh -n dnode1 -s start
#system sh/exec.sh -n dnode2 -s start
#
#sleep 2000
#sql show bnodes
#if $rows != 2 then
# return -1
#endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
......@@ -360,8 +360,9 @@ endi
if $data04 != @abc0@ then
return -1
endi
sql select distinct tbname,t1,t2 from select_tags_mt0;
print "really this line"
sql select distinct tbname,t1,t2 from select_tags_mt0 order by tbname;
print $data00 $data01 $data02 $data10 $data111 $data12
if $row != 16 then
return -1
endi
......@@ -390,7 +391,7 @@ if $data12 != @abc1@ then
return -1
endi
sql select tbname,ts from select_tags_mt0;
sql select tbname,ts from select_tags_mt0 order by ts;
if $row != 12800 then
return -1
endi
......
......@@ -99,7 +99,7 @@ if $rows != 1 then
endi
#sql select * from information_schema.`streams`
sql select * from information_schema.user_tables
if $rows != 31 then
if $rows <= 0 then
return -1
endi
#sql select * from information_schema.user_table_distributed
......@@ -197,7 +197,7 @@ if $rows != 1 then
endi
#sql select * from performance_schema.`streams`
sql select * from information_schema.user_tables
if $rows != 31 then
if $rows <= 0 then
return -1
endi
#sql select * from information_schema.user_table_distributed
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
sql connect
sql create dnode $hostname port 7200
sql create dnode $hostname port 7300
sql create dnode $hostname port 7400
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
print ===> $data20 $data21 $data22 $data23 $data24 $data25
print ===> $data30 $data31 $data32 $data33 $data34 $data35
if $rows != 4 then
return -1
endi
if $data(1)[4] != ready then
goto step1
endi
if $data(2)[4] != ready then
goto step1
endi
if $data(3)[4] != ready then
goto step1
endi
if $data(4)[4] != ready then
goto step1
endi
$replica = 3
$vgroups = 30
print ============= create database
sql create database db replica $replica vgroups $vgroups
$loop_cnt = 0
check_db_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 100 then
print ====> db not ready!
return -1
endi
sql show databases
print ===> rows: $rows
print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19]
if $rows != 3 then
return -1
endi
if $data[2][15] != ready then
goto check_db_ready
endi
sql use db
$loop_cnt = 0
check_vg_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 300 then
print ====> vgroups not ready!
return -1
endi
sql show vgroups
print ===> rows: $rows
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
if $rows != $vgroups then
return -1
endi
if $data[0][4] == leader then
if $data[0][6] == follower then
if $data[0][8] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
endi
endi
elif $data[0][6] == leader then
if $data[0][4] == follower then
if $data[0][8] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
endi
endi
elif $data[0][8] == leader then
if $data[0][4] == follower then
if $data[0][6] == follower then
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
endi
endi
else
goto check_vg_ready
endi
vg_ready:
print ====> create stable/child table
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int)
sql show stables
if $rows != 1 then
return -1
endi
sql create table ct1 using stb tags(1000)
print ===> write 1000 records
$N = 10000
$count = 0
while $count < $N
$ms = 1591200000000 + $count
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
$count = $count + 1
endw
......@@ -132,7 +132,7 @@ print ===> write 100 records
$N = 100
$count = 0
while $count < $N
$ms = 1591200000000 + $count
$ms = 1658924000000 + $count
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
$count = $count + 1
endw
......@@ -149,7 +149,7 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
sleep 3000
########################################################
print ===> start dnode1 dnode2 dnode3 dnode4
......
......@@ -105,7 +105,7 @@ if $rows != 1 then
endi
sql select * from information_schema.user_tables
if $rows != 31 then
if $rows <= 0 then
return -1
endi
......
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
from numpy import row_stack
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
tdSql.init(conn.cursor())
self.host = socket.gethostname()
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stopThread(self,thread):
self._async_raise(thread.ident, SystemExit)
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db0_0',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 1,
'stbName': 'stb',
'stbNumbers': 2,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
"rowsPerTbl": 100,
"batchNum": 5000
}
username="user1"
passwd="123"
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkMnodeStatus(1)
# fisr add three mnodes;
tdLog.info("fisr add three mnodes and check mnode status")
tdSql.execute("create mnode on dnode 2")
clusterComCheck.checkMnodeStatus(2)
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
# recreate mnode
tdSql.execute("drop dnode 2;")
tdSql.execute('create dnode "%s:6130";'%self.host)
tdDnodes=cluster.dnodes
tdDnodes[1].stoptaosd()
tdDnodes[1].deploy()
tdDnodes[1].starttaosd()
clusterComCheck.checkDnodes(dnodeNumbers)
tdSql.execute("create mnode on dnode 6")
tdSql.error("drop dnode 1;")
# check status of clusters
clusterComCheck.checkMnodeStatus(3)
tdSql.execute("create user %s pass '%s' ;"%(username,passwd))
tdSql.query("show users")
for i in range(tdSql.queryRows):
if tdSql.queryResult[i][0] == "%s"%username :
tdLog.info("create user:%s successfully"%username)
# # create database and stable
# clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
# tdLog.info("Take turns stopping Mnodes ")
# tdDnodes=cluster.dnodes
# stopcount =0
# threads=[]
# # create stable:stb_0
# stableName= paraDict['stbName']
# newTdSql=tdCom.newTdSql()
# clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
# #create child table:ctb_0
# for i in range(paraDict['stbNumbers']):
# stableName= '%s_%d'%(paraDict['stbName'],i)
# newTdSql=tdCom.newTdSql()
# clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
# #insert date
# for i in range(paraDict['stbNumbers']):
# stableName= '%s_%d'%(paraDict['stbName'],i)
# newTdSql=tdCom.newTdSql()
# threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
# for tr in threads:
# tr.start()
# for tr in threads:
# tr.join()
# while stopcount < restartNumbers:
# tdLog.info(" restart loop: %d"%stopcount )
# if stopRole == "mnode":
# for i in range(mnodeNums):
# tdDnodes[i].stoptaosd()
# # sleep(10)
# tdDnodes[i].starttaosd()
# # sleep(10)
# elif stopRole == "vnode":
# for i in range(vnodeNumbers):
# tdDnodes[i+mnodeNums].stoptaosd()
# # sleep(10)
# tdDnodes[i+mnodeNums].starttaosd()
# # sleep(10)
# elif stopRole == "dnode":
# for i in range(dnodeNumbers):
# tdDnodes[i].stoptaosd()
# # sleep(10)
# tdDnodes[i].starttaosd()
# # sleep(10)
# # dnodeNumbers don't include database of schema
# if clusterComCheck.checkDnodes(dnodeNumbers):
# tdLog.info("dnode is ready")
# else:
# print("dnodes is not ready")
# self.stopThread(threads)
# tdLog.exit("one or more of dnodes failed to start ")
# # self.check3mnode()
# stopcount+=1
# clusterComCheck.checkDnodes(dnodeNumbers)
# clusterComCheck.checkDbRows(dbNumbers)
# # clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
# tdSql.execute("use %s" %(paraDict["dbName"]))
# tdSql.query("show stables")
# tdSql.checkRows(paraDict["stbNumbers"])
# # for i in range(paraDict['stbNumbers']):
# # stableName= '%s_%d'%(paraDict['stbName'],i)
# # tdSql.query("select * from %s"%stableName)
# # tdSql.checkRows(rowsPerStb)
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
......@@ -190,10 +190,9 @@ class TDTestCase:
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
tdLog.info("dnode is ready")
else:
print("456")
print("dnodes is not ready")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
......@@ -207,10 +206,11 @@ class TDTestCase:
tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("show stables")
tdSql.checkRows(paraDict["stbNumbers"])
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
tdSql.query("select * from %s"%stableName)
tdSql.checkRows(rowsPerStb)
# for i in range(paraDict['stbNumbers']):
# stableName= '%s_%d'%(paraDict['stbName'],i)
# tdSql.query("select * from %s"%stableName)
# tdSql.checkRows(rowsPerStb)
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
......
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
from numpy import row_stack
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import time
import inspect
import ctypes
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
tdSql.init(conn.cursor())
self.host = socket.gethostname()
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _async_raise(self, tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stopThread(self,thread):
self._async_raise(thread.ident, SystemExit)
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
print("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
tdSql.execute("use db%d" %couti)
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db0_0',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 1,
'stbName': 'stb',
'stbNumbers': 2,
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
"rowsPerTbl": 100,
"batchNum": 5000
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkMnodeStatus(1)
# fisr add three mnodes;
tdLog.info("fisr add three mnodes and check mnode status")
tdSql.execute("create mnode on dnode 2")
clusterComCheck.checkMnodeStatus(2)
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
# create stable:stb_0
stableName= paraDict['stbName']
newTdSql=tdCom.newTdSql()
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
#create child table:ctb_0
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
#insert date
for i in range(paraDict['stbNumbers']):
stableName= '%s_%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
for tr in threads:
tr.start()
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
for i in range(mnodeNums):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
# sleep(10)
tdDnodes[i+mnodeNums].starttaosd()
# sleep(10)
elif stopRole == "dnode":
for i in range(dnodeNumbers):
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("dnode is ready")
else:
print("dnodes is not ready")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
tdSql.execute("use %s" %(paraDict["dbName"]))
tdSql.query("show stables")
tdSql.checkRows(paraDict["stbNumbers"])
# for i in range(paraDict['stbNumbers']):
# stableName= '%s_%d'%(paraDict['stbName'],i)
# tdSql.query("select * from %s"%stableName)
# tdSql.checkRows(rowsPerStb)
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
......@@ -98,8 +98,10 @@ class TDTestCase:
# fisr add three mnodes;
tdLog.info("fisr add three mnodes and check mnode status")
tdSql.info("create mnode on dnode 2")
tdSql.execute("create mnode on dnode 2")
clusterComCheck.checkMnodeStatus(2)
tdSql.info("create mnode on dnode 3")
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
......
......@@ -68,7 +68,7 @@ class TDTestCase:
'showRow': 1}
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = int(dnodenumbers * restartNumber)
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
......@@ -104,7 +104,7 @@ class TDTestCase:
tdDnodes[1].starttaosd()
tdDnodes[2].starttaosd()
clusterComCheck.checkMnodeStatus(3)
clusterComCheck.checkMnodeStatus(mnodeNums)
def run(self):
......
......@@ -111,14 +111,14 @@ class TDTestCase:
# seperate vnode and mnode in different dnodes.
# create database and stable
stopcount =0
while stopcount <= 2:
while stopcount < restartNumber:
tdLog.info("first restart loop")
for i in range(dnodenumbers):
tdDnodes[i].stoptaosd()
tdDnodes[i].starttaosd()
stopcount+=1
clusterComCheck.checkDnodes(dnodenumbers)
clusterComCheck.checkMnodeStatus(3)
clusterComCheck.checkMnodeStatus(mnodeNums)
def run(self):
# print(self.master_dnode.cfgDict)
......
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db0_0',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'replica': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1}
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
clusterComCheck.checkDnodes(dnodenumbers)
clusterComCheck.checkMnodeStatus(1)
# fisr add three mnodes;
tdLog.info("fisr add three mnodes and check mnode status")
tdSql.execute("create mnode on dnode 2")
clusterComCheck.checkMnodeStatus(2)
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
# print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodenumbers)
# restart all taosd
tdDnodes=cluster.dnodes
tdLog.info("stop two mnode ")
tdDnodes[0].stoptaosd()
tdDnodes[1].stoptaosd()
# tdLog.info("check whether 2 mnode status is offline")
# clusterComCheck.check3mnode2off()
# tdSql.error("create user user1 pass '123';")
tdLog.info("start one mnode" )
tdDnodes[0].starttaosd()
clusterComCheck.check3mnodeoff(2)
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
clusterComCheck.checkDb(dbNumbers,1,'db0')
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
......@@ -55,6 +55,7 @@ class ClusterComCheck:
count+=1
time.sleep(1)
else:
tdSql.query("show dnodes")
tdLog.debug(tdSql.queryResult)
tdLog.exit("it find cluster with %d dnodes but check that there dnodes are not ready within 30s ! "%dnodeNumbers)
......@@ -111,7 +112,7 @@ class ClusterComCheck:
def checkMnodeStatus(self,mnodeNums):
self.mnodeNums=int(mnodeNums)
# self.leaderDnode=int(leaderDnode)
tdLog.debug("start to check status of mnodes")
count=0
while count < 10:
......
......@@ -43,9 +43,9 @@ class TDTestCase:
tdLog.exit("compare error: %s != %s"%src, dst)
else:
break
tdSql.execute('use db_taosx')
tdSql.query("select * from ct3 order by c1 desc")
tdSql.query("select * from ct3 order by c1 desc")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 51)
tdSql.checkData(0, 4, 940)
......@@ -58,17 +58,17 @@ class TDTestCase:
tdSql.query("select * from ct2")
tdSql.checkRows(0)
tdSql.query("select * from ct0 order by c1")
tdSql.query("select * from ct0 order by c1 ")
tdSql.checkRows(2)
tdSql.checkData(0, 3, "a")
tdSql.checkData(1, 4, None)
tdSql.query("select * from n1 order by cc3 desc")
tdSql.query("select * from n1 order by ts")
tdSql.checkRows(2)
tdSql.checkData(0, 1, "eeee")
tdSql.checkData(1, 2, 940)
tdSql.query("select * from jt order by i desc")
tdSql.query("select * from jt order by i desc;")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 11)
tdSql.checkData(0, 2, None)
......
......@@ -164,6 +164,7 @@ python3 ./test.py -f 2-query/function_null.py
python3 ./test.py -f 2-query/queryQnode.py
python3 ./test.py -f 2-query/max_partition.py
python3 ./test.py -f 2-query/last_row.py
python3 ./test.py -f 2-query/tsbsQuery.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
......@@ -171,7 +172,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3
......@@ -179,6 +180,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartMnodeInsertData.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartVnodeInsertData.py -N 5 -M 3
......@@ -187,6 +189,11 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3
python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
python3 ./test.py -f 7-tmq/basic5.py
python3 ./test.py -f 7-tmq/subscribeDb.py
......@@ -216,7 +223,7 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py
# python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
......@@ -332,7 +339,7 @@ python3 ./test.py -f 2-query/function_null.py -Q 2
python3 ./test.py -f 2-query/count_partition.py -Q 2
python3 ./test.py -f 2-query/max_partition.py -Q 2
python3 ./test.py -f 2-query/last_row.py -Q 2
python3 ./test.py -f 2-query/tsbsQuery.py -Q 2
#------------querPolicy 3-----------
python3 ./test.py -f 2-query/between.py -Q 3
......@@ -419,3 +426,4 @@ python3 ./test.py -f 2-query/function_null.py -Q 3
python3 ./test.py -f 2-query/count_partition.py -Q 3
python3 ./test.py -f 2-query/max_partition.py -Q 3
python3 ./test.py -f 2-query/last_row.py -Q 3
python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
\ No newline at end of file
......@@ -24,4 +24,7 @@ target_include_directories(
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
add_dependencies(taosd jemalloc)
ENDIF ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册