提交 3698e02b 编写于 作者: B Benguang Zhao

Merge branch '2.4' into fix/TS-1601-V24

...@@ -387,14 +387,41 @@ pipeline { ...@@ -387,14 +387,41 @@ pipeline {
} }
} }
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 25, unit: 'MINUTES') { timeout(time: 60, unit: 'MINUTES') {
sh ''' script {
date def extra_param = ""
cd ${WKC}/tests/parallel_test def log_server_file = "/home/log_server.json"
time ./run.sh -m /home/m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME} def timeout_cmd = ""
date if (fileExists(log_server_file)) {
hostname def log_server_enabled = sh (
''' script: 'jq .enabled ' + log_server_file,
returnStdout: true
).trim()
def timeout_param = sh (
script: 'jq .timeout ' + log_server_file,
returnStdout: true
).trim()
if (timeout_param != "null" && timeout_param != "0") {
timeout_cmd = "timeout " + timeout_param
}
if (log_server_enabled == "1") {
def log_server = sh (
script: 'jq .server ' + log_server_file + ' | sed "s/\\\"//g"',
returnStdout: true
).trim()
if (log_server != "null" && log_server != "") {
extra_param = "-w " + log_server
}
}
}
sh '''
date
cd ${WKC}/tests/parallel_test
''' + timeout_cmd + ''' time ./run.sh -m /home/m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME} ''' + extra_param + '''
date
hostname
'''
}
} }
} }
} }
......
...@@ -22,7 +22,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc ...@@ -22,7 +22,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. **Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. **Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. **Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
title: 2.4 title: 2.4
--- ---
[2.4.0.30](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.30)
[2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26) [2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26)
[2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25) [2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25)
......
...@@ -26,7 +26,7 @@ public class LineProtocolExample { ...@@ -26,7 +26,7 @@ public class LineProtocolExample {
private static void createDatabase(Connection conn) throws SQLException { private static void createDatabase(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
// the default precision is ms (microsecond), but we use us(microsecond) here. // the default precision is ms (millisecond), but we use us(microsecond) here.
stmt.execute("CREATE DATABASE IF NOT EXISTS test PRECISION 'us'"); stmt.execute("CREATE DATABASE IF NOT EXISTS test PRECISION 'us'");
stmt.execute("USE test"); stmt.execute("USE test");
} }
......
...@@ -23,7 +23,7 @@ TDengine 分布式架构的逻辑结构图如下: ...@@ -23,7 +23,7 @@ TDengine 分布式架构的逻辑结构图如下:
**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。 **虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。 **管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。 **虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
title: 2.4 title: 2.4
--- ---
[2.4.0.30](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.30)
[2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26) [2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26)
[2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25) [2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25)
......
...@@ -1028,7 +1028,7 @@ static int32_t applyDataPointsWithSqlInsert(TAOS* taos, TAOS_SML_DATA_POINT* poi ...@@ -1028,7 +1028,7 @@ static int32_t applyDataPointsWithSqlInsert(TAOS* taos, TAOS_SML_DATA_POINT* poi
if (info->numBatches >= MAX_SML_SQL_INSERT_BATCHES) { if (info->numBatches >= MAX_SML_SQL_INSERT_BATCHES) {
tscError("SML:0x%"PRIx64" Apply points failed. exceeds max sql insert batches", info->id); tscError("SML:0x%"PRIx64" Apply points failed. exceeds max sql insert batches", info->id);
code = TSDB_CODE_TSC_OUT_OF_MEMORY; code = TSDB_CODE_TSC_TOO_MANY_SML_LINES;
goto cleanup; goto cleanup;
} }
...@@ -1047,7 +1047,7 @@ static int32_t applyDataPointsWithSqlInsert(TAOS* taos, TAOS_SML_DATA_POINT* poi ...@@ -1047,7 +1047,7 @@ static int32_t applyDataPointsWithSqlInsert(TAOS* taos, TAOS_SML_DATA_POINT* poi
tscDebug("SML:0x%"PRIx64" sql: %s" , info->id, batch->sql); tscDebug("SML:0x%"PRIx64" sql: %s" , info->id, batch->sql);
if (info->numBatches >= MAX_SML_SQL_INSERT_BATCHES) { if (info->numBatches >= MAX_SML_SQL_INSERT_BATCHES) {
tscError("SML:0x%"PRIx64" Apply points failed. exceeds max sql insert batches", info->id); tscError("SML:0x%"PRIx64" Apply points failed. exceeds max sql insert batches", info->id);
code = TSDB_CODE_TSC_OUT_OF_MEMORY; code = TSDB_CODE_TSC_TOO_MANY_SML_LINES;
goto cleanup; goto cleanup;
} }
bool batchesExecuted[MAX_SML_SQL_INSERT_BATCHES] = {false}; bool batchesExecuted[MAX_SML_SQL_INSERT_BATCHES] = {false};
......
...@@ -116,6 +116,7 @@ int32_t* taosGetErrno(); ...@@ -116,6 +116,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x0226) //"Invalid timestamp precision type") #define TSDB_CODE_TSC_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x0226) //"Invalid timestamp precision type")
#define TSDB_CODE_TSC_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0227) //"Result set too large to be output") #define TSDB_CODE_TSC_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0227) //"Result set too large to be output")
#define TSDB_CODE_TSC_INVALID_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0228) //"invalid table schema version") #define TSDB_CODE_TSC_INVALID_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0228) //"invalid table schema version")
#define TSDB_CODE_TSC_TOO_MANY_SML_LINES TAOS_DEF_ERROR_CODE(0, 0x0229) //"too many lines in batch")
// mnode // mnode
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed" #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed"
......
Subproject commit a875a057d1225d85c6323b9edaccc2b1a9641987 Subproject commit 7105027650b51e701cfa1dac11b8fb42d447dd01
...@@ -2560,7 +2560,8 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { ...@@ -2560,7 +2560,8 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
tMemBucket * pMemBucket = ppInfo->pMemBucket; tMemBucket * pMemBucket = ppInfo->pMemBucket;
if (pMemBucket == NULL || pMemBucket->total == 0) { // check for null if (pMemBucket == NULL || pMemBucket->total == 0) { // check for null
assert(ppInfo->numOfElems == 0); if (ppInfo->stage > 0)
assert(ppInfo->numOfElems == 0);
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} else { } else {
SET_DOUBLE_VAL((double *)pCtx->pOutput, getPercentile(pMemBucket, v)); SET_DOUBLE_VAL((double *)pCtx->pOutput, getPercentile(pMemBucket, v));
......
...@@ -986,7 +986,7 @@ static SMemRow getSMemRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, ...@@ -986,7 +986,7 @@ static SMemRow getSMemRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order,
return rmem; return rmem;
} else { } else {
pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH; pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH;
extraRow = rimem; *extraRow = rimem;
return rmem; return rmem;
} }
} else { } else {
......
...@@ -123,6 +123,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range") ...@@ -123,6 +123,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE, "Invalid line protocol type") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE, "Invalid line protocol type")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PRECISION_TYPE, "Invalid timestamp precision type") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PRECISION_TYPE, "Invalid timestamp precision type")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_RES_TOO_MANY, "Result set too large to be output") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_RES_TOO_MANY, "Result set too large to be output")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_TOO_MANY_SML_LINES, "Too many lines in batch")
// mnode // mnode
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
......
...@@ -7,10 +7,11 @@ function usage() { ...@@ -7,10 +7,11 @@ function usage() {
echo -e "\t -b branch" echo -e "\t -b branch"
echo -e "\t -l log dir" echo -e "\t -l log dir"
echo -e "\t -o default timeout value" echo -e "\t -o default timeout value"
echo -e "\t -w log web server"
echo -e "\t -h help" echo -e "\t -h help"
} }
while getopts "m:t:b:l:o:h" opt; do while getopts "m:t:b:l:o:w:h" opt; do
case $opt in case $opt in
m) m)
config_file=$OPTARG config_file=$OPTARG
...@@ -27,6 +28,9 @@ while getopts "m:t:b:l:o:h" opt; do ...@@ -27,6 +28,9 @@ while getopts "m:t:b:l:o:h" opt; do
o) o)
timeout_param="-o $OPTARG" timeout_param="-o $OPTARG"
;; ;;
w)
web_server=$OPTARG
;;
h) h)
usage usage
exit 0 exit 0
...@@ -59,10 +63,11 @@ if [ ! -f $t_file ]; then ...@@ -59,10 +63,11 @@ if [ ! -f $t_file ]; then
exit 1 exit 1
fi fi
date_tag=`date +%Y%m%d-%H%M%S` date_tag=`date +%Y%m%d-%H%M%S`
test_log_dir=${branch}_${date_tag}
if [ -z $log_dir ]; then if [ -z $log_dir ]; then
log_dir="log/${branch}_${date_tag}" log_dir="log/${test_log_dir}"
else else
log_dir="$log_dir/${branch}_${date_tag}" log_dir="$log_dir/${test_log_dir}"
fi fi
hosts=() hosts=()
...@@ -134,14 +139,14 @@ function build_src() { ...@@ -134,14 +139,14 @@ function build_src() {
echo "$cmd" echo "$cmd"
${cmd} ${cmd}
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
flock -x $lock_file -c "echo \"${hosts[index]} TDengine build failed\" >>$log_dir/failed.log" flock -x $lock_file -c "echo \"${hosts[index]} TDengine build failed\" >>${failed_case_file}"
return return
fi fi
script=". ~/.bashrc;cd ${workdirs[index]}/taos-tools;git submodule update --init --recursive;mkdir -p build;cd build;cmake ..;make -j4" script=". ~/.bashrc;cd ${workdirs[index]}/taos-tools;git submodule update --init --recursive;mkdir -p build;cd build;cmake ..;make -j4"
cmd="${ssh_script} sh -c \"$script\"" cmd="${ssh_script} sh -c \"$script\""
${cmd} ${cmd}
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
flock -x $lock_file -c "echo \"${hosts[index]} taos-tools build failed\" >>$log_dir/failed.log" flock -x $lock_file -c "echo \"${hosts[index]} taos-tools build failed\" >>${failed_case_file}"
return return
fi fi
script="cp -rf ${workdirs[index]}/taos-tools/build/build/bin/* ${workdirs[index]}/TDinternal/debug/build/bin/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib64/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/TDinternal/debug/build/bin/taosBenchmark ${workdirs[index]}/TDinternal/debug/build/bin/taosdemo" script="cp -rf ${workdirs[index]}/taos-tools/build/build/bin/* ${workdirs[index]}/TDinternal/debug/build/bin/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib64/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/TDinternal/debug/build/bin/taosBenchmark ${workdirs[index]}/TDinternal/debug/build/bin/taosdemo"
...@@ -191,6 +196,10 @@ function run_thread() { ...@@ -191,6 +196,10 @@ function run_thread() {
local exec_dir=`echo "$line"|cut -d, -f3` local exec_dir=`echo "$line"|cut -d, -f3`
local case_cmd=`echo "$line"|cut -d, -f4` local case_cmd=`echo "$line"|cut -d, -f4`
local case_file="" local case_file=""
echo "$case_cmd"|grep -q "\.sh"
if [ $? -eq 0 ]; then
case_file=`echo "$case_cmd"|grep -o ".*\.sh"|awk '{print $NF}'`
fi
echo "$case_cmd"|grep -q "^python3" echo "$case_cmd"|grep -q "^python3"
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'` case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'`
...@@ -215,44 +224,54 @@ function run_thread() { ...@@ -215,44 +224,54 @@ function run_thread() {
# echo "$thread_no $count $cmd" # echo "$thread_no $count $cmd"
local ret=0 local ret=0
local redo_count=1 local redo_count=1
local case_log_file=$log_dir/${case_file}.txt
start_time=`date +%s` start_time=`date +%s`
local case_index=`flock -x $lock_file -c "sh -c \"echo \\\$(( \\\$( cat $index_file ) + 1 )) | tee $index_file\""`
case_index=`printf "%5d" $case_index`
local case_info=`echo "$line"|cut -d, -f 3,4`
while [ ${redo_count} -lt 6 ]; do while [ ${redo_count} -lt 6 ]; do
if [ -f $log_dir/$case_file.log ]; then if [ -f $case_log_file ]; then
cp $log_dir/$case_file.log $log_dir/$case_file.${redo_count}.redolog cp $case_log_file $log_dir/$case_file.${redo_count}.redotxt
fi fi
echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$log_dir/$case_file.log echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$case_log_file
echo -e "\e[33m >>>>> \e[0m ${case_cmd}" local current_time=`date "+%Y-%m-%d %H:%M:%S"`
date >>$log_dir/$case_file.log echo -e "$case_index \e[33m START >>>>> \e[0m ${case_info} \e[33m[$current_time]\e[0m"
# $cmd 2>&1 | tee -a $log_dir/$case_file.log echo "$current_time" >>$case_log_file
local real_start_time=`date +%s`
# $cmd 2>&1 | tee -a $case_log_file
# ret=${PIPESTATUS[0]} # ret=${PIPESTATUS[0]}
$cmd >>$log_dir/$case_file.log 2>&1 $cmd >>$case_log_file 2>&1
ret=$? ret=$?
echo "${hosts[index]} `date` ret:${ret}" >>$log_dir/$case_file.log local real_end_time=`date +%s`
local time_elapsed=$(( real_end_time - real_start_time ))
echo "execute time: ${time_elapsed}s" >>$case_log_file
current_time=`date "+%Y-%m-%d %H:%M:%S"`
echo "${hosts[index]} $current_time exit code:${ret}" >>$case_log_file
if [ $ret -eq 0 ]; then if [ $ret -eq 0 ]; then
break break
fi fi
redo=0 redo=0
grep -q "wait too long for taosd start" $log_dir/$case_file.log grep -q "wait too long for taosd start" $case_log_file
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
redo=1 redo=1
fi fi
grep -q "kex_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log grep -q "kex_exchange_identification: Connection closed by remote host" $case_log_file
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
redo=1 redo=1
fi fi
grep -q "ssh_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log grep -q "ssh_exchange_identification: Connection closed by remote host" $case_log_file
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
redo=1 redo=1
fi fi
grep -q "kex_exchange_identification: read: Connection reset by peer" $log_dir/$case_file.log grep -q "kex_exchange_identification: read: Connection reset by peer" $case_log_file
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
redo=1 redo=1
fi fi
grep -q "Database not ready" $log_dir/$case_file.log grep -q "Database not ready" $case_log_file
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
redo=1 redo=1
fi fi
grep -q "Unable to establish connection" $log_dir/$case_file.log grep -q "Unable to establish connection" $case_log_file
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
redo=1 redo=1
fi fi
...@@ -265,11 +284,18 @@ function run_thread() { ...@@ -265,11 +284,18 @@ function run_thread() {
redo_count=$(( redo_count + 1 )) redo_count=$(( redo_count + 1 ))
done done
end_time=`date +%s` end_time=`date +%s`
echo >>$log_dir/$case_file.log echo >>$case_log_file
echo "${hosts[index]} execute time: $(( end_time - start_time ))s" >>$log_dir/$case_file.log total_time=$(( end_time - start_time ))
echo "${hosts[index]} total time: ${total_time}s" >>$case_log_file
# echo "$thread_no ${line} DONE" # echo "$thread_no ${line} DONE"
if [ $ret -ne 0 ]; then if [ $ret -eq 0 ]; then
flock -x $lock_file -c "echo \"${hosts[index]} ret:${ret} ${line}\" >>$log_dir/failed.log" echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[32m success\e[0m"
else
if [ ! -z ${web_server} ]; then
flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n ${web_server}/$test_log_dir/${case_file}.txt\" >>${failed_case_file}"
else
flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n log file: ${case_log_file}\" >>${failed_case_file}"
fi
mkdir -p $log_dir/${case_file}.coredump mkdir -p $log_dir/${case_file}.coredump
local remote_coredump_dir="${workdirs[index]}/tmp/thread_volume/$thread_no/coredump" local remote_coredump_dir="${workdirs[index]}/tmp/thread_volume/$thread_no/coredump"
local scpcmd="sshpass -p ${passwords[index]} scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}" local scpcmd="sshpass -p ${passwords[index]} scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}"
...@@ -278,14 +304,16 @@ function run_thread() { ...@@ -278,14 +304,16 @@ function run_thread() {
fi fi
cmd="$scpcmd:${remote_coredump_dir}/* $log_dir/${case_file}.coredump/" cmd="$scpcmd:${remote_coredump_dir}/* $log_dir/${case_file}.coredump/"
$cmd # 2>/dev/null $cmd # 2>/dev/null
local case_info=`echo "$line"|cut -d, -f 3,4`
local corefile=`ls $log_dir/${case_file}.coredump/` local corefile=`ls $log_dir/${case_file}.coredump/`
corefile=`find $log_dir/${case_file}.coredump/ -name "core.*"` corefile=`find $log_dir/${case_file}.coredump/ -name "core*"`
echo -e "$case_info \e[31m failed\e[0m" echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[31m failed\e[0m"
echo "=========================log============================" echo "=========================log============================"
cat $log_dir/$case_file.log cat $case_log_file
echo "=====================================================" echo "====================================================="
echo -e "\e[34m log file: $log_dir/$case_file.log \e[0m" echo -e "\e[34m log file: $case_log_file \e[0m"
if [ ! -z "${web_server}" ]; then
echo "${web_server}/$test_log_dir/${case_file}.txt"
fi
if [ ! -z "$corefile" ]; then if [ ! -z "$corefile" ]; then
echo -e "\e[34m corefiles: $corefile \e[0m" echo -e "\e[34m corefiles: $corefile \e[0m"
local build_dir=$log_dir/build_${hosts[index]} local build_dir=$log_dir/build_${hosts[index]}
...@@ -320,6 +348,10 @@ mkdir -p $log_dir ...@@ -320,6 +348,10 @@ mkdir -p $log_dir
rm -rf $log_dir/* rm -rf $log_dir/*
task_file=$log_dir/$$.task task_file=$log_dir/$$.task
lock_file=$log_dir/$$.lock lock_file=$log_dir/$$.lock
index_file=$log_dir/case_index.txt
stat_file=$log_dir/stat.txt
failed_case_file=$log_dir/failed.txt
echo "0" >$index_file
i=0 i=0
while [ $i -lt ${#hosts[*]} ]; do while [ $i -lt ${#hosts[*]} ]; do
...@@ -328,10 +360,6 @@ while [ $i -lt ${#hosts[*]} ]; do ...@@ -328,10 +360,6 @@ while [ $i -lt ${#hosts[*]} ]; do
i=$(( i + 1 )) i=$(( i + 1 ))
done done
wait wait
# if [ -f "$log_dir/failed.log" ]; then
# cat $log_dir/failed.log
# exit 1
# fi
i=0 i=0
j=0 j=0
...@@ -357,15 +385,45 @@ rm -f $lock_file ...@@ -357,15 +385,45 @@ rm -f $lock_file
rm -f $task_file rm -f $task_file
# docker ps -a|grep -v CONTAINER|awk '{print $1}'|xargs docker rm -f # docker ps -a|grep -v CONTAINER|awk '{print $1}'|xargs docker rm -f
echo "====================================================================="
echo "log dir: $log_dir"
total_cases=`cat $index_file`
failed_cases=0
if [ -f $failed_case_file ]; then
if [ ! -z "$web_server" ]; then
failed_cases=`grep -v "$web_server" $failed_case_file|wc -l`
else
failed_cases=`grep -v "log file:" $failed_case_file|wc -l`
fi
fi
success_cases=$(( total_cases - failed_cases ))
echo "Total Cases: $total_cases" >$stat_file
echo "Successful: $success_cases" >>$stat_file
echo "Failed: $failed_cases" >>$stat_file
cat $stat_file
RET=0 RET=0
i=1 i=1
if [ -f "$log_dir/failed.log" ]; then if [ -f "${failed_case_file}" ]; then
echo "=====================================================" echo "====================================================="
while read line; do while read line; do
if [ ! -z "${web_server}" ]; then
echo "$line"|grep -q "${web_server}"
if [ $? -eq 0 ]; then
echo " $line"
continue
fi
else
echo "$line"|grep -q "log file:"
if [ $? -eq 0 ]; then
echo " $line"
continue
fi
fi
line=`echo "$line"|cut -d, -f 3,4` line=`echo "$line"|cut -d, -f 3,4`
echo -e "$i. $line \e[31m failed\e[0m" >&2 echo -e "$i. $line \e[31m failed\e[0m" >&2
i=$(( i + 1 )) i=$(( i + 1 ))
done <$log_dir/failed.log done <${failed_case_file}
RET=1 RET=1
fi fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册