提交 f3b42525 编写于 作者: T tickduan

Merge branch 'master' into cq with continue query funciton

[submodule "src/connector/go"] [submodule "src/connector/go"]
path = src/connector/go path = src/connector/go
url = https://github.com/taosdata/driver-go url = git@github.com:taosdata/driver-go.git
[submodule "src/connector/grafanaplugin"] [submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin path = src/connector/grafanaplugin
url = https://github.com/taosdata/grafanaplugin url = git@github.com:taosdata/grafanaplugin.git
[submodule "src/connector/hivemq-tdengine-extension"] [submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension path = src/connector/hivemq-tdengine-extension
url = https://github.com/huskar-t/hivemq-tdengine-extension.git url = git@github.com:taosdata/hivemq-tdengine-extension.git
[submodule "tests/examples/rust"] [submodule "tests/examples/rust"]
path = tests/examples/rust path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git url = https://github.com/songtianyi/tdengine-rust-bindings.git
...@@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "") ...@@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_LINUX TRUE) SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE) SET(TD_LINUX_64 FALSE)
SET(TD_ARM_64 TRUE) SET(TD_ARM_64 TRUE)
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(CPUTYPE "mips64")
MESSAGE(STATUS "Set CPUTYPE to mips64")
SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE)
SET(TD_MIPS_64 TRUE)
ENDIF () ENDIF ()
ELSE () ELSE ()
......
...@@ -4,7 +4,7 @@ PROJECT(TDengine) ...@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "2.0.20.2") SET(TD_VER_NUMBER "2.0.20.5")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
...@@ -58,7 +58,12 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat ...@@ -58,7 +58,12 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_path}/connector if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
else
echo "grafanaplugin bundled directory not found!"
exit 1
fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
......
...@@ -66,7 +66,12 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin ...@@ -66,7 +66,12 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
else
echo grafanaplugin bundled directory not found!
exit 1
fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
......
...@@ -243,9 +243,17 @@ function install_data() { ...@@ -243,9 +243,17 @@ function install_data() {
} }
function install_connector() { function install_connector() {
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin ${install_main_dir}/connector if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector ${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
${csudo} cp -rf ${source_dir}/src/connector/go ${install_main_dir}/connector
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null ${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
} }
......
...@@ -117,10 +117,18 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -117,10 +117,18 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
cp -r ${connector_dir}/go ${install_dir}/connector else
cp -r ${connector_dir}/nodejs ${install_dir}/connector echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi fi
# Copy release note # Copy release note
# cp ${script_dir}/release_note ${install_dir} # cp ${script_dir}/release_note ${install_dir}
......
...@@ -144,9 +144,17 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -144,9 +144,17 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
cp -r ${connector_dir}/go ${install_dir}/connector else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
......
...@@ -114,6 +114,25 @@ mkdir -p ${install_dir}/examples ...@@ -114,6 +114,25 @@ mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples" examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples cp -r ${examples_dir}/c ${install_dir}/examples
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ -d ${examples_dir}/JDBC/connectionPools/target ]; then
rm -rf ${examples_dir}/JDBC/connectionPools/target
fi
if [ -d ${examples_dir}/JDBC/JDBCDemo/target ]; then
rm -rf ${examples_dir}/JDBC/JDBCDemo/target
fi
if [ -d ${examples_dir}/JDBC/mybatisplus-demo/target ]; then
rm -rf ${examples_dir}/JDBC/mybatisplus-demo/target
fi
if [ -d ${examples_dir}/JDBC/springbootdemo/target ]; then
rm -rf ${examples_dir}/JDBC/springbootdemo/target
fi
if [ -d ${examples_dir}/JDBC/SpringJdbcTemplate/target ]; then
rm -rf ${examples_dir}/JDBC/SpringJdbcTemplate/target
fi
if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then
rm -rf ${examples_dir}/JDBC/taosdemo/target
fi
cp -r ${examples_dir}/JDBC ${install_dir}/examples cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples cp -r ${examples_dir}/python ${install_dir}/examples
...@@ -131,9 +150,17 @@ connector_dir="${code_dir}/connector" ...@@ -131,9 +150,17 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
cp -r ${connector_dir}/go ${install_dir}/connector else
echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi fi
# Copy release note # Copy release note
......
...@@ -166,9 +166,18 @@ connector_dir="${code_dir}/connector" ...@@ -166,9 +166,18 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
......
name: tdengine name: tdengine
base: core18 base: core18
version: '2.0.20.2' version: '2.0.20.5'
icon: snap/gui/t-dengine.svg icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT. summary: an open-source big data platform designed and optimized for IoT.
description: | description: |
...@@ -72,7 +72,7 @@ parts: ...@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd - usr/bin/taosd
- usr/bin/taos - usr/bin/taos
- usr/bin/taosdemo - usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.20.2 - usr/lib/libtaos.so.2.0.20.5
- usr/lib/libtaos.so.1 - usr/lib/libtaos.so.1
- usr/lib/libtaos.so - usr/lib/libtaos.so
......
...@@ -6494,7 +6494,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { ...@@ -6494,7 +6494,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
size_t valSize = taosArrayGetSize(pValList); size_t valSize = taosArrayGetSize(pValList);
// too long tag values will return invalid sql, not be truncated automatically // too long tag values will return invalid sql, not be truncated automatically
SSchema *pTagSchema = tscGetTableTagSchema(pStableMetaInfo->pTableMeta); SSchema *pTagSchema = tscGetTableTagSchema(pStableMetaInfo->pTableMeta);
STagData *pTag = &pCreateTableInfo->tagdata; STagData *pTag = &pCreateTableInfo->tagdata;
...@@ -6504,7 +6503,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { ...@@ -6504,7 +6503,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
SArray* pNameList = NULL; SArray* pNameList = NULL;
size_t nameSize = 0; size_t nameSize = 0;
int32_t schemaSize = tscGetNumOfTags(pStableMetaInfo->pTableMeta); int32_t schemaSize = tscGetNumOfTags(pStableMetaInfo->pTableMeta);
...@@ -7119,6 +7117,7 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i ...@@ -7119,6 +7117,7 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
const char* msg6 = "too many tables in from clause"; const char* msg6 = "too many tables in from clause";
const char* msg7 = "invalid table alias name"; const char* msg7 = "invalid table alias name";
const char* msg8 = "alias name too long"; const char* msg8 = "alias name too long";
const char* msg9 = "only tag query not compatible with normal column filter";
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
...@@ -7288,6 +7287,20 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i ...@@ -7288,6 +7287,20 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL; return TSDB_CODE_TSC_INVALID_SQL;
} }
if(tscQueryTags(pQueryInfo)) {
SSqlExpr* pExpr1 = tscSqlExprGet(pQueryInfo, 0);
if (pExpr1->functionId != TSDB_FUNC_TID_TAG) {
int32_t numOfCols = (int32_t)taosArrayGetSize(pQueryInfo->colList);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumn* pCols = taosArrayGetP(pQueryInfo->colList, i);
if (pCols->numOfFilters > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
}
}
}
} }
if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) { if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) {
......
...@@ -2020,8 +2020,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { ...@@ -2020,8 +2020,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
} }
} }
tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql->self, pTableMeta->id.uid, pTableMeta->id.tid, tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self,
tNameGetTableName(&pTableMetaInfo->name)); pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name), pTableMeta->tableInfo.numOfColumns,
pTableMeta->tableInfo.numOfTags);
free(pTableMeta); free(pTableMeta);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -2164,8 +2165,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { ...@@ -2164,8 +2165,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups; pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
if (pInfo->vgroupList->numOfVgroups <= 0) { if (pInfo->vgroupList->numOfVgroups <= 0) {
//tfree(pInfo->vgroupList); tscDebug("0x%"PRIx64" empty vgroup info", pSql->self);
tscError("0x%"PRIx64" empty vgroup info", pSql->self);
} else { } else {
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
// just init, no need to lock // just init, no need to lock
......
...@@ -215,7 +215,7 @@ static void tscProcessSubscriptionTimer(void *handle, void *tmrId) { ...@@ -215,7 +215,7 @@ static void tscProcessSubscriptionTimer(void *handle, void *tmrId) {
taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer); taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer);
} }
//TODO refactor: extract table list name not simply from the sql
static SArray* getTableList( SSqlObj* pSql ) { static SArray* getTableList( SSqlObj* pSql ) {
const char* p = strstr( pSql->sqlstr, " from " ); const char* p = strstr( pSql->sqlstr, " from " );
assert(p != NULL); // we are sure this is a 'select' statement assert(p != NULL); // we are sure this is a 'select' statement
...@@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) { ...@@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
SSqlObj* pNew = taos_query(pSql->pTscObj, sql); SSqlObj* pNew = taos_query(pSql->pTscObj, sql);
if (pNew == NULL) { if (pNew == NULL) {
tscError("0x%"PRIx64"failed to retrieve table id: cannot create new sql object.", pSql->self); tscError("0x%"PRIx64" failed to retrieve table id: cannot create new sql object.", pSql->self);
return NULL; return NULL;
} else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) { } else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64"failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew))); tscError("0x%"PRIx64" failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew)));
return NULL; return NULL;
} }
......
Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5 Subproject commit 8ce6d86558afc8c0b50c10f990fd2b4270cf06fc
...@@ -9,7 +9,7 @@ const ffi = require('ffi-napi'); ...@@ -9,7 +9,7 @@ const ffi = require('ffi-napi');
const ArrayType = require('ref-array-napi'); const ArrayType = require('ref-array-napi');
const Struct = require('ref-struct-napi'); const Struct = require('ref-struct-napi');
const FieldTypes = require('./constants'); const FieldTypes = require('./constants');
const errors = require ('./error'); const errors = require('./error');
const TaosObjects = require('./taosobjects'); const TaosObjects = require('./taosobjects');
const { NULL_POINTER } = require('ref-napi'); const { NULL_POINTER } = require('ref-napi');
...@@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) { ...@@ -22,7 +22,7 @@ function convertMicrosecondsToDatetime(time) {
return new TaosObjects.TaosTimestamp(time * 0.001, true); return new TaosObjects.TaosTimestamp(time * 0.001, true);
} }
function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
timestampConverter = convertMillisecondsToDatetime; timestampConverter = convertMillisecondsToDatetime;
if (micro == true) { if (micro == true) {
timestampConverter = convertMicrosecondsToDatetime; timestampConverter = convertMicrosecondsToDatetime;
...@@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false ...@@ -44,14 +44,14 @@ function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro=false
} }
return res; return res;
} }
function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = new Array(data.length); let res = new Array(data.length);
for (let i = 0; i < data.length; i++) { for (let i = 0; i < data.length; i++) {
if (data[i] == 0) { if (data[i] == 0) {
res[i] = false; res[i] = false;
} }
else if (data[i] == 1){ else if (data[i] == 1) {
res[i] = true; res[i] = true;
} }
else if (data[i] == FieldTypes.C_BOOL_NULL) { else if (data[i] == FieldTypes.C_BOOL_NULL) {
...@@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { ...@@ -60,29 +60,29 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
} }
return res; return res;
} }
function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
while (currOffset < data.length) { while (currOffset < data.length) {
let d = data.readIntLE(currOffset,1); let d = data.readIntLE(currOffset, 1);
res.push(d == FieldTypes.C_TINYINT_NULL ? null : d); res.push(d == FieldTypes.C_TINYINT_NULL ? null : d);
currOffset += nbytes; currOffset += nbytes;
} }
return res; return res;
} }
function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
while (currOffset < data.length) { while (currOffset < data.length) {
let d = data.readIntLE(currOffset,2); let d = data.readIntLE(currOffset, 2);
res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d); res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d);
currOffset += nbytes; currOffset += nbytes;
} }
return res; return res;
} }
function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
...@@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { ...@@ -93,7 +93,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
} }
return res; return res;
} }
function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
...@@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { ...@@ -104,7 +104,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
} }
return res; return res;
} }
function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
...@@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { ...@@ -115,7 +115,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
} }
return res; return res;
} }
function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
...@@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { ...@@ -126,7 +126,7 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
} }
return res; return res;
} }
function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let currOffset = 0; let currOffset = 0;
...@@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { ...@@ -142,7 +142,7 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
} }
return res; return res;
} }
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = []; let res = [];
let dataEntry = data.slice(0, nbytes); //one entry in a row under a column; let dataEntry = data.slice(0, nbytes); //one entry in a row under a column;
...@@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) { ...@@ -153,23 +153,23 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
// Object with all the relevant converters from pblock data to javascript readable data // Object with all the relevant converters from pblock data to javascript readable data
let convertFunctions = { let convertFunctions = {
[FieldTypes.C_BOOL] : convertBool, [FieldTypes.C_BOOL]: convertBool,
[FieldTypes.C_TINYINT] : convertTinyint, [FieldTypes.C_TINYINT]: convertTinyint,
[FieldTypes.C_SMALLINT] : convertSmallint, [FieldTypes.C_SMALLINT]: convertSmallint,
[FieldTypes.C_INT] : convertInt, [FieldTypes.C_INT]: convertInt,
[FieldTypes.C_BIGINT] : convertBigint, [FieldTypes.C_BIGINT]: convertBigint,
[FieldTypes.C_FLOAT] : convertFloat, [FieldTypes.C_FLOAT]: convertFloat,
[FieldTypes.C_DOUBLE] : convertDouble, [FieldTypes.C_DOUBLE]: convertDouble,
[FieldTypes.C_BINARY] : convertBinary, [FieldTypes.C_BINARY]: convertBinary,
[FieldTypes.C_TIMESTAMP] : convertTimestamp, [FieldTypes.C_TIMESTAMP]: convertTimestamp,
[FieldTypes.C_NCHAR] : convertNchar [FieldTypes.C_NCHAR]: convertNchar
} }
// Define TaosField structure // Define TaosField structure
var char_arr = ArrayType(ref.types.char); var char_arr = ArrayType(ref.types.char);
var TaosField = Struct({ var TaosField = Struct({
'name': char_arr, 'name': char_arr,
}); });
TaosField.fields.name.type.size = 65; TaosField.fields.name.type.size = 65;
TaosField.defineProperty('type', ref.types.char); TaosField.defineProperty('type', ref.types.char);
TaosField.defineProperty('bytes', ref.types.short); TaosField.defineProperty('bytes', ref.types.short);
...@@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short); ...@@ -183,7 +183,7 @@ TaosField.defineProperty('bytes', ref.types.short);
* @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to * @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to
* access this class directly and use it unless you understand what these functions do. * access this class directly and use it unless you understand what these functions do.
*/ */
function CTaosInterface (config = null, pass = false) { function CTaosInterface(config = null, pass = false) {
ref.types.char_ptr = ref.refType(ref.types.char); ref.types.char_ptr = ref.refType(ref.types.char);
ref.types.void_ptr = ref.refType(ref.types.void); ref.types.void_ptr = ref.refType(ref.types.void);
ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); ref.types.void_ptr2 = ref.refType(ref.types.void_ptr);
...@@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) { ...@@ -196,64 +196,65 @@ function CTaosInterface (config = null, pass = false) {
taoslibname = 'libtaos'; taoslibname = 'libtaos';
} }
this.libtaos = ffi.Library(taoslibname, { this.libtaos = ffi.Library(taoslibname, {
'taos_options': [ ref.types.int, [ ref.types.int , ref.types.void_ptr ] ], 'taos_options': [ref.types.int, [ref.types.int, ref.types.void_ptr]],
'taos_init': [ ref.types.void, [ ] ], 'taos_init': [ref.types.void, []],
//TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port) //TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
'taos_connect': [ ref.types.void_ptr, [ ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int ] ], 'taos_connect': [ref.types.void_ptr, [ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int]],
//void taos_close(TAOS *taos) //void taos_close(TAOS *taos)
'taos_close': [ ref.types.void, [ ref.types.void_ptr ] ], 'taos_close': [ref.types.void, [ref.types.void_ptr]],
//int *taos_fetch_lengths(TAOS_RES *taos); //int *taos_fetch_lengths(TAOS_RES *res);
'taos_fetch_lengths': [ ref.types.void_ptr, [ ref.types.void_ptr ] ], 'taos_fetch_lengths': [ref.types.void_ptr, [ref.types.void_ptr]],
//int taos_query(TAOS *taos, char *sqlstr) //int taos_query(TAOS *taos, char *sqlstr)
'taos_query': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr ] ], 'taos_query': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr]],
//int taos_affected_rows(TAOS *taos) //int taos_affected_rows(TAOS_RES *res)
'taos_affected_rows': [ ref.types.int, [ ref.types.void_ptr] ], 'taos_affected_rows': [ref.types.int, [ref.types.void_ptr]],
//int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) //int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)
'taos_fetch_block': [ ref.types.int, [ ref.types.void_ptr, ref.types.void_ptr] ], 'taos_fetch_block': [ref.types.int, [ref.types.void_ptr, ref.types.void_ptr]],
//int taos_num_fields(TAOS_RES *res); //int taos_num_fields(TAOS_RES *res);
'taos_num_fields': [ ref.types.int, [ ref.types.void_ptr] ], 'taos_num_fields': [ref.types.int, [ref.types.void_ptr]],
//TAOS_ROW taos_fetch_row(TAOS_RES *res) //TAOS_ROW taos_fetch_row(TAOS_RES *res)
//TAOS_ROW is void **, but we set the return type as a reference instead to get the row //TAOS_ROW is void **, but we set the return type as a reference instead to get the row
'taos_fetch_row': [ ref.refType(ref.types.void_ptr2), [ ref.types.void_ptr ] ], 'taos_fetch_row': [ref.refType(ref.types.void_ptr2), [ref.types.void_ptr]],
'taos_print_row': [ref.types.int, [ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]],
//int taos_result_precision(TAOS_RES *res) //int taos_result_precision(TAOS_RES *res)
'taos_result_precision': [ ref.types.int, [ ref.types.void_ptr ] ], 'taos_result_precision': [ref.types.int, [ref.types.void_ptr]],
//void taos_free_result(TAOS_RES *res) //void taos_free_result(TAOS_RES *res)
'taos_free_result': [ ref.types.void, [ ref.types.void_ptr] ], 'taos_free_result': [ref.types.void, [ref.types.void_ptr]],
//int taos_field_count(TAOS *taos) //int taos_field_count(TAOS *taos)
'taos_field_count': [ ref.types.int, [ ref.types.void_ptr ] ], 'taos_field_count': [ref.types.int, [ref.types.void_ptr]],
//TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) //TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)
'taos_fetch_fields': [ ref.refType(TaosField), [ ref.types.void_ptr ] ], 'taos_fetch_fields': [ref.refType(TaosField), [ref.types.void_ptr]],
//int taos_errno(TAOS *taos) //int taos_errno(TAOS *taos)
'taos_errno': [ ref.types.int, [ ref.types.void_ptr] ], 'taos_errno': [ref.types.int, [ref.types.void_ptr]],
//char *taos_errstr(TAOS *taos) //char *taos_errstr(TAOS *taos)
'taos_errstr': [ ref.types.char_ptr, [ ref.types.void_ptr] ], 'taos_errstr': [ref.types.char_ptr, [ref.types.void_ptr]],
//void taos_stop_query(TAOS_RES *res); //void taos_stop_query(TAOS_RES *res);
'taos_stop_query': [ ref.types.void, [ ref.types.void_ptr] ], 'taos_stop_query': [ref.types.void, [ref.types.void_ptr]],
//char *taos_get_server_info(TAOS *taos); //char *taos_get_server_info(TAOS *taos);
'taos_get_server_info': [ ref.types.char_ptr, [ ref.types.void_ptr ] ], 'taos_get_server_info': [ref.types.char_ptr, [ref.types.void_ptr]],
//char *taos_get_client_info(); //char *taos_get_client_info();
'taos_get_client_info': [ ref.types.char_ptr, [ ] ], 'taos_get_client_info': [ref.types.char_ptr, []],
// ASYNC // ASYNC
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
'taos_query_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr ] ], 'taos_query_a': [ref.types.void, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr]],
// void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
'taos_fetch_rows_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr ]], 'taos_fetch_rows_a': [ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr]],
// Subscription // Subscription
//TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) //TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)
'taos_subscribe': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int] ], 'taos_subscribe': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]],
// TAOS_RES *taos_consume(TAOS_SUB *tsub) // TAOS_RES *taos_consume(TAOS_SUB *tsub)
'taos_consume': [ ref.types.void_ptr, [ref.types.void_ptr] ], 'taos_consume': [ref.types.void_ptr, [ref.types.void_ptr]],
//void taos_unsubscribe(TAOS_SUB *tsub); //void taos_unsubscribe(TAOS_SUB *tsub);
'taos_unsubscribe': [ ref.types.void, [ ref.types.void_ptr ] ], 'taos_unsubscribe': [ref.types.void, [ref.types.void_ptr]],
// Continuous Query // Continuous Query
//TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), //TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
// int64_t stime, void *param, void (*callback)(void *)); // int64_t stime, void *param, void (*callback)(void *));
'taos_open_stream': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr ] ], 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]],
//void taos_close_stream(TAOS_STREAM *tstr); //void taos_close_stream(TAOS_STREAM *tstr);
'taos_close_stream': [ ref.types.void, [ ref.types.void_ptr ] ] 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]]
}); });
if (pass == false) { if (pass == false) {
...@@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) { ...@@ -264,7 +265,7 @@ function CTaosInterface (config = null, pass = false) {
try { try {
this._config = ref.allocCString(config); this._config = ref.allocCString(config);
} }
catch(err){ catch (err) {
throw "Attribute Error: config is expected as a str"; throw "Attribute Error: config is expected as a str";
} }
} }
...@@ -276,38 +277,38 @@ function CTaosInterface (config = null, pass = false) { ...@@ -276,38 +277,38 @@ function CTaosInterface (config = null, pass = false) {
return this; return this;
} }
CTaosInterface.prototype.config = function config() { CTaosInterface.prototype.config = function config() {
return this._config; return this._config;
} }
CTaosInterface.prototype.connect = function connect(host=null, user="root", password="taosdata", db=null, port=0) { CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) {
let _host,_user,_password,_db,_port; let _host, _user, _password, _db, _port;
try { try {
_host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL); _host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL);
} }
catch(err) { catch (err) {
throw "Attribute Error: host is expected as a str"; throw "Attribute Error: host is expected as a str";
} }
try { try {
_user = ref.allocCString(user) _user = ref.allocCString(user)
} }
catch(err) { catch (err) {
throw "Attribute Error: user is expected as a str"; throw "Attribute Error: user is expected as a str";
} }
try { try {
_password = ref.allocCString(password); _password = ref.allocCString(password);
} }
catch(err) { catch (err) {
throw "Attribute Error: password is expected as a str"; throw "Attribute Error: password is expected as a str";
} }
try { try {
_db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL); _db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL);
} }
catch(err) { catch (err) {
throw "Attribute Error: db is expected as a str"; throw "Attribute Error: db is expected as a str";
} }
try { try {
_port = ref.alloc(ref.types.int, port); _port = ref.alloc(ref.types.int, port);
} }
catch(err) { catch (err) {
throw TypeError("port is expected as an int") throw TypeError("port is expected as an int")
} }
let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port); let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port);
...@@ -324,10 +325,10 @@ CTaosInterface.prototype.close = function close(connection) { ...@@ -324,10 +325,10 @@ CTaosInterface.prototype.close = function close(connection) {
console.log("Connection is closed"); console.log("Connection is closed");
} }
CTaosInterface.prototype.query = function query(connection, sql) { CTaosInterface.prototype.query = function query(connection, sql) {
return this.libtaos.taos_query(connection, ref.allocCString(sql)); return this.libtaos.taos_query(connection, ref.allocCString(sql));
} }
CTaosInterface.prototype.affectedRows = function affectedRows(connection) { CTaosInterface.prototype.affectedRows = function affectedRows(result) {
return this.libtaos.taos_affected_rows(connection); return this.libtaos.taos_affected_rows(result);
} }
CTaosInterface.prototype.useResult = function useResult(result) { CTaosInterface.prototype.useResult = function useResult(result) {
...@@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) { ...@@ -337,8 +338,8 @@ CTaosInterface.prototype.useResult = function useResult(result) {
pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0); pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0);
for (let i = 0; i < pfields.length; i += 68) { for (let i = 0; i < pfields.length; i += 68) {
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
fields.push( { fields.push({
name: ref.readCString(ref.reinterpret(pfields,65,i)), name: ref.readCString(ref.reinterpret(pfields, 65, i)),
type: pfields[i + 65], type: pfields[i + 65],
bytes: pfields[i + 66] bytes: pfields[i + 66]
}) })
...@@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) { ...@@ -347,11 +348,10 @@ CTaosInterface.prototype.useResult = function useResult(result) {
return fields; return fields;
} }
CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
//let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data let pblock = ref.NULL_POINTER;
let pblock = this.libtaos.taos_fetch_row(result); let num_of_rows = this.libtaos.taos_fetch_block(result, pblock);
let num_of_rows = 1; if (ref.isNull(pblock.deref()) == true) {
if (ref.isNull(pblock) == true) { return { block: null, num_of_rows: 0 };
return {block:null, num_of_rows:0};
} }
var fieldL = this.libtaos.taos_fetch_lengths(result); var fieldL = this.libtaos.taos_fetch_lengths(result);
...@@ -359,10 +359,10 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { ...@@ -359,10 +359,10 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO); let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
var fieldlens = []; var fieldlens = [];
if (ref.isNull(fieldL) == false) { if (ref.isNull(fieldL) == false) {
for (let i = 0; i < fields.length; i ++) { for (let i = 0; i < fields.length; i++) {
let plen = ref.reinterpret(fieldL, 4, i*4); let plen = ref.reinterpret(fieldL, 4, i * 4);
let len = plen.readInt32LE(0); let len = plen.readInt32LE(0);
fieldlens.push(len); fieldlens.push(len);
} }
...@@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { ...@@ -370,21 +370,23 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let blocks = new Array(fields.length); let blocks = new Array(fields.length);
blocks.fill(null); blocks.fill(null);
//num_of_rows = Math.abs(num_of_rows); num_of_rows = Math.abs(num_of_rows);
let offset = 0; let offset = 0;
let ptr = pblock.deref();
for (let i = 0; i < fields.length; i++) { for (let i = 0; i < fields.length; i++) {
pdata = ref.reinterpret(pblock,8,i*8); pdata = ref.reinterpret(ptr, 8, i * 8);
if(ref.isNull(pdata.readPointer())){ if (ref.isNull(pdata.readPointer())) {
blocks[i] = new Array(); blocks[i] = new Array();
}else{ } else {
pdata = ref.ref(pdata.readPointer()); pdata = ref.ref(pdata.readPointer());
if (!convertFunctions[fields[i]['type']] ) { if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database"); throw new errors.DatabaseError("Invalid data type returned from database");
} }
blocks[i] = convertFunctions[fields[i]['type']](pdata, 1, fieldlens[i], offset, isMicro); blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, isMicro);
} }
} }
return {blocks: blocks, num_of_rows:Math.abs(num_of_rows)} return { blocks: blocks, num_of_rows }
} }
CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) { CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) {
let row = this.libtaos.taos_fetch_row(result); let row = this.libtaos.taos_fetch_row(result);
...@@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) { ...@@ -414,7 +416,7 @@ CTaosInterface.prototype.errStr = function errStr(result) {
// Async // Async
CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) { CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) {
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param) // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param)
callback = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.int ], callback); callback = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], callback);
this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param); this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param);
return param; return param;
} }
...@@ -439,46 +441,46 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback, ...@@ -439,46 +441,46 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
var fieldL = cti.libtaos.taos_fetch_lengths(result); var fieldL = cti.libtaos.taos_fetch_lengths(result);
var fieldlens = []; var fieldlens = [];
if (ref.isNull(fieldL) == false) { if (ref.isNull(fieldL) == false) {
for (let i = 0; i < fields.length; i ++) { for (let i = 0; i < fields.length; i++) {
let plen = ref.reinterpret(fieldL, 8, i*8); let plen = ref.reinterpret(fieldL, 8, i * 8);
let len = ref.get(plen,0,ref.types.int32); let len = ref.get(plen, 0, ref.types.int32);
fieldlens.push(len); fieldlens.push(len);
} }
} }
if (numOfRows2 > 0){ if (numOfRows2 > 0) {
for (let i = 0; i < fields.length; i++) { for (let i = 0; i < fields.length; i++) {
if(ref.isNull(pdata.readPointer())){ if (ref.isNull(pdata.readPointer())) {
blocks[i] = new Array(); blocks[i] = new Array();
}else{ } else {
if (!convertFunctions[fields[i]['type']] ) { if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database"); throw new errors.DatabaseError("Invalid data type returned from database");
} }
let prow = ref.reinterpret(row,8,i*8); let prow = ref.reinterpret(row, 8, i * 8);
prow = prow.readPointer(); prow = prow.readPointer();
prow = ref.ref(prow); prow = ref.ref(prow);
blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro); blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
//offset += fields[i]['bytes'] * numOfRows2; //offset += fields[i]['bytes'] * numOfRows2;
} }
} }
} }
callback(param2, result2, numOfRows2, blocks); callback(param2, result2, numOfRows2, blocks);
} }
asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.int ], asyncCallbackWrapper); asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], asyncCallbackWrapper);
this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param); this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param);
return param; return param;
} }
// Fetch field meta data by result handle // Fetch field meta data by result handle
CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) { CTaosInterface.prototype.fetchFields_a = function fetchFields_a(result) {
let pfields = this.fetchFields(result); let pfields = this.fetchFields(result);
let pfieldscount = this.numFields(result); let pfieldscount = this.numFields(result);
let fields = []; let fields = [];
if (ref.isNull(pfields) == false) { if (ref.isNull(pfields) == false) {
pfields = ref.reinterpret(pfields, 68 * pfieldscount , 0); pfields = ref.reinterpret(pfields, 68 * pfieldscount, 0);
for (let i = 0; i < pfields.length; i += 68) { for (let i = 0; i < pfields.length; i += 68) {
//0 - 64 = name //65 = type, 66 - 67 = bytes //0 - 64 = name //65 = type, 66 - 67 = bytes
fields.push( { fields.push({
name: ref.readCString(ref.reinterpret(pfields,65,i)), name: ref.readCString(ref.reinterpret(pfields, 65, i)),
type: pfields[i + 65], type: pfields[i + 65],
bytes: pfields[i + 66] bytes: pfields[i + 66]
}) })
...@@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) { ...@@ -488,7 +490,7 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
} }
// Stop a query by result handle // Stop a query by result handle
CTaosInterface.prototype.stopQuery = function stopQuery(result) { CTaosInterface.prototype.stopQuery = function stopQuery(result) {
if (result != null){ if (result != null) {
this.libtaos.taos_stop_query(result); this.libtaos.taos_stop_query(result);
} }
else { else {
...@@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top ...@@ -509,13 +511,13 @@ CTaosInterface.prototype.subscribe = function subscribe(connection, restart, top
try { try {
sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL); sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL);
} }
catch(err) { catch (err) {
throw "Attribute Error: sql is expected as a str"; throw "Attribute Error: sql is expected as a str";
} }
try { try {
topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL); topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL);
} }
catch(err) { catch (err) {
throw TypeError("topic is expected as a str"); throw TypeError("topic is expected as a str");
} }
...@@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) { ...@@ -539,8 +541,8 @@ CTaosInterface.prototype.consume = function consume(subscription) {
pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0); pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0);
for (let i = 0; i < pfields.length; i += 68) { for (let i = 0; i < pfields.length; i += 68) {
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
fields.push( { fields.push({
name: ref.readCString(ref.reinterpret(pfields,64,i)), name: ref.readCString(ref.reinterpret(pfields, 64, i)),
bytes: pfields[i + 64], bytes: pfields[i + 64],
type: pfields[i + 66] type: pfields[i + 66]
}) })
...@@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) { ...@@ -548,7 +550,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
} }
let data = []; let data = [];
while(true) { while (true) {
let { blocks, num_of_rows } = this.fetchBlock(result, fields); let { blocks, num_of_rows } = this.fetchBlock(result, fields);
if (num_of_rows == 0) { if (num_of_rows == 0) {
break; break;
...@@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) { ...@@ -559,7 +561,7 @@ CTaosInterface.prototype.consume = function consume(subscription) {
for (let j = 0; j < fields.length; j++) { for (let j = 0; j < fields.length; j++) {
rowBlock[j] = blocks[j][i]; rowBlock[j] = blocks[j][i];
} }
data[data.length-1] = (rowBlock); data[data.length - 1] = (rowBlock);
} }
} }
return { data: data, fields: fields, result: result }; return { data: data, fields: fields, result: result };
...@@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) { ...@@ -570,11 +572,11 @@ CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
} }
// Continuous Query // Continuous Query
CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime,stoppingCallback, param = ref.ref(ref.NULL)) { CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime, stoppingCallback, param = ref.ref(ref.NULL)) {
try { try {
sql = ref.allocCString(sql); sql = ref.allocCString(sql);
} }
catch(err) { catch (err) {
throw "Attribute Error: sql string is expected as a str"; throw "Attribute Error: sql string is expected as a str";
} }
var cti = this; var cti = this;
...@@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb ...@@ -587,7 +589,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
let offset = 0; let offset = 0;
if (numOfRows2 > 0) { if (numOfRows2 > 0) {
for (let i = 0; i < fields.length; i++) { for (let i = 0; i < fields.length; i++) {
if (!convertFunctions[fields[i]['type']] ) { if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database"); throw new errors.DatabaseError("Invalid data type returned from database");
} }
blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro); blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro);
...@@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb ...@@ -596,8 +598,8 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
} }
callback(param2, result2, blocks, fields); callback(param2, result2, blocks, fields);
} }
asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2) ], asyncCallbackWrapper); asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2)], asyncCallbackWrapper);
asyncStoppingCallbackWrapper = ffi.Callback( ref.types.void, [ ref.types.void_ptr ], stoppingCallback); asyncStoppingCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr], stoppingCallback);
let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper); let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper);
if (ref.isNull(streamHandle)) { if (ref.isNull(streamHandle)) {
throw new errors.TDError('Failed to open a stream with TDengine'); throw new errors.TDError('Failed to open a stream with TDengine');
......
const ref = require('ref-napi'); const ref = require('ref-napi');
require('./globalfunc.js') require('./globalfunc.js')
const CTaosInterface = require('./cinterface') const CTaosInterface = require('./cinterface')
const errors = require ('./error') const errors = require('./error')
const TaosQuery = require('./taosquery') const TaosQuery = require('./taosquery')
const { PerformanceObserver, performance } = require('perf_hooks'); const { PerformanceObserver, performance } = require('perf_hooks');
module.exports = TDengineCursor; module.exports = TDengineCursor;
...@@ -22,7 +22,7 @@ module.exports = TDengineCursor; ...@@ -22,7 +22,7 @@ module.exports = TDengineCursor;
* @property {fields} - Array of the field objects in order from left to right of the latest data retrieved * @property {fields} - Array of the field objects in order from left to right of the latest data retrieved
* @since 1.0.0 * @since 1.0.0
*/ */
function TDengineCursor(connection=null) { function TDengineCursor(connection = null) {
//All parameters are store for sync queries only. //All parameters are store for sync queries only.
this._rowcount = -1; this._rowcount = -1;
this._connection = null; this._connection = null;
...@@ -91,7 +91,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback ...@@ -91,7 +91,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
return null; return null;
} }
if (typeof options == 'function') { if (typeof options == 'function') {
callback = options; callback = options;
} }
if (typeof options != 'object') options = {} if (typeof options != 'object') options = {}
...@@ -144,10 +144,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback ...@@ -144,10 +144,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
} }
TDengineCursor.prototype._createAffectedResponse = function (num, time) { TDengineCursor.prototype._createAffectedResponse = function (num, time) {
return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)"; return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)";
} }
TDengineCursor.prototype._createSetResponse = function (num, time) { TDengineCursor.prototype._createSetResponse = function (num, time) {
return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)"; return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)";
} }
TDengineCursor.prototype.executemany = function executemany() { TDengineCursor.prototype.executemany = function executemany() {
...@@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) { ...@@ -176,27 +176,22 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first"); throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first");
} }
let data = []; let num_of_rows = this._chandle.affectedRows(this._result);
let data = new Array(num_of_rows);
this._rowcount = 0; this._rowcount = 0;
//let nodetime = 0;
let time = 0; let time = 0;
const obs = new PerformanceObserver((items) => { const obs = new PerformanceObserver((items) => {
time += items.getEntries()[0].duration; time += items.getEntries()[0].duration;
performance.clearMarks(); performance.clearMarks();
}); });
/*
const obs2 = new PerformanceObserver((items) => {
nodetime += items.getEntries()[0].duration;
performance.clearMarks();
});
obs2.observe({ entryTypes: ['measure'] });
performance.mark('nodea');
*/
obs.observe({ entryTypes: ['measure'] }); obs.observe({ entryTypes: ['measure'] });
performance.mark('A'); performance.mark('A');
while(true) { while (true) {
let blockAndRows = this._chandle.fetchBlock(this._result, this._fields); let blockAndRows = this._chandle.fetchBlock(this._result, this._fields);
// console.log(blockAndRows);
// break;
let block = blockAndRows.blocks; let block = blockAndRows.blocks;
let num_of_rows = blockAndRows.num_of_rows; let num_of_rows = blockAndRows.num_of_rows;
if (num_of_rows == 0) { if (num_of_rows == 0) {
...@@ -205,22 +200,24 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) { ...@@ -205,22 +200,24 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
this._rowcount += num_of_rows; this._rowcount += num_of_rows;
let numoffields = this._fields.length; let numoffields = this._fields.length;
for (let i = 0; i < num_of_rows; i++) { for (let i = 0; i < num_of_rows; i++) {
data.push([]); // data.push([]);
let rowBlock = new Array(numoffields); let rowBlock = new Array(numoffields);
for (let j = 0; j < numoffields; j++) { for (let j = 0; j < numoffields; j++) {
rowBlock[j] = block[j][i]; rowBlock[j] = block[j][i];
} }
data[data.length-1] = (rowBlock); data[this._rowcount - num_of_rows + i] = (rowBlock);
// data.push(rowBlock);
} }
} }
performance.mark('B'); performance.mark('B');
performance.measure('query', 'A', 'B'); performance.measure('query', 'A', 'B');
let response = this._createSetResponse(this._rowcount, time) let response = this._createSetResponse(this._rowcount, time)
console.log(response); console.log(response);
// this._connection._clearResultSet(); // this._connection._clearResultSet();
let fields = this.fields; let fields = this.fields;
this._reset_result(); this._reset_result();
this.data = data; this.data = data;
...@@ -239,12 +236,12 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) { ...@@ -239,12 +236,12 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
* @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
* @since 1.0.0 * @since 1.0.0
*/ */
TDengineCursor.prototype.execute_a = function execute_a (operation, options, callback, param) { TDengineCursor.prototype.execute_a = function execute_a(operation, options, callback, param) {
if (operation == undefined) { if (operation == undefined) {
throw new errors.ProgrammingError('No operation passed as argument'); throw new errors.ProgrammingError('No operation passed as argument');
return null; return null;
} }
if (typeof options == 'function') { if (typeof options == 'function') {
//we expect the parameter after callback to be param //we expect the parameter after callback to be param
param = callback; param = callback;
callback = options; callback = options;
...@@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal ...@@ -265,14 +262,14 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
} }
if (resCode >= 0) { if (resCode >= 0) {
// let fieldCount = cr._chandle.numFields(res2); // let fieldCount = cr._chandle.numFields(res2);
// if (fieldCount == 0) { // if (fieldCount == 0) {
// //cr._chandle.freeResult(res2); // //cr._chandle.freeResult(res2);
// return res2; // return res2;
// } // }
// else { // else {
// return res2; // return res2;
// } // }
return res2; return res2;
} }
...@@ -317,7 +314,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal ...@@ -317,7 +314,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
* }) * })
*/ */
TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) { TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) {
if (typeof options == 'function') { if (typeof options == 'function') {
//we expect the parameter after callback to be param //we expect the parameter after callback to be param
param = callback; param = callback;
callback = options; callback = options;
...@@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb ...@@ -360,17 +357,17 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
for (let k = 0; k < fields.length; k++) { for (let k = 0; k < fields.length; k++) {
rowBlock[k] = block[k][j]; rowBlock[k] = block[k][j];
} }
data[data.length-1] = rowBlock; data[data.length - 1] = rowBlock;
} }
} }
cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks! cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks!
callback(param2, result2, numOfRows2, {data:data,fields:fields}); callback(param2, result2, numOfRows2, { data: data, fields: fields });
} }
} }
ref.writeObject(buf, 0, param); ref.writeObject(buf, 0, param);
param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param
return {param:param,result:result}; return { param: param, result: result };
} }
/** /**
* Stop a query given the result handle. * Stop a query given the result handle.
...@@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) { ...@@ -428,7 +425,7 @@ TDengineCursor.prototype.subscribe = function subscribe(config) {
*/ */
TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) { TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) {
while (true) { while (true) {
let { data, fields, result} = this._chandle.consume(subscription); let { data, fields, result } = this._chandle.consume(subscription);
callback(data, fields, result); callback(data, fields, result);
} }
} }
...@@ -450,30 +447,30 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) { ...@@ -450,30 +447,30 @@ TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
* @return {Buffer} A buffer pointing to the stream handle * @return {Buffer} A buffer pointing to the stream handle
* @since 1.3.0 * @since 1.3.0
*/ */
TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) { TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) {
let buf = ref.alloc('Object'); let buf = ref.alloc('Object');
ref.writeObject(buf, 0, param); ref.writeObject(buf, 0, param);
let asyncCallbackWrapper = function (param2, result2, blocks, fields) { let asyncCallbackWrapper = function (param2, result2, blocks, fields) {
let data = []; let data = [];
let num_of_rows = blocks[0].length; let num_of_rows = blocks[0].length;
for (let j = 0; j < num_of_rows; j++) { for (let j = 0; j < num_of_rows; j++) {
data.push([]); data.push([]);
let rowBlock = new Array(fields.length); let rowBlock = new Array(fields.length);
for (let k = 0; k < fields.length; k++) { for (let k = 0; k < fields.length; k++) {
rowBlock[k] = blocks[k][j]; rowBlock[k] = blocks[k][j];
} }
data[data.length-1] = rowBlock; data[data.length - 1] = rowBlock;
} }
callback(param2, result2, blocks, fields); callback(param2, result2, blocks, fields);
} }
return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf); return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf);
} }
/** /**
* Close a stream * Close a stream
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed * @param {Buffer} - A buffer pointing to the handle of the stream to be closed
* @since 1.3.0 * @since 1.3.0
*/ */
TDengineCursor.prototype.closeStream = function closeStream(stream) { TDengineCursor.prototype.closeStream = function closeStream(stream) {
this._chandle.closeStream(stream); this._chandle.closeStream(stream);
} }
{
"name": "td2.0-connector",
"version": "2.0.6",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"array-index": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/array-index/-/array-index-1.0.0.tgz",
"integrity": "sha1-7FanSe4QPk4Ix5C5w1PfFgVbl/k=",
"requires": {
"debug": "^2.2.0",
"es6-symbol": "^3.0.2"
},
"dependencies": {
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"requires": {
"ms": "2.0.0"
}
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
}
}
},
"d": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz",
"integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==",
"requires": {
"es5-ext": "^0.10.50",
"type": "^1.0.1"
}
},
"debug": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
"integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
"requires": {
"ms": "2.1.2"
}
},
"es5-ext": {
"version": "0.10.53",
"resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz",
"integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==",
"requires": {
"es6-iterator": "~2.0.3",
"es6-symbol": "~3.1.3",
"next-tick": "~1.0.0"
}
},
"es6-iterator": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz",
"integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=",
"requires": {
"d": "1",
"es5-ext": "^0.10.35",
"es6-symbol": "^3.1.1"
}
},
"es6-symbol": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz",
"integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==",
"requires": {
"d": "^1.0.1",
"ext": "^1.1.2"
}
},
"ext": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz",
"integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==",
"requires": {
"type": "^2.0.0"
},
"dependencies": {
"type": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/type/-/type-2.1.0.tgz",
"integrity": "sha512-G9absDWvhAWCV2gmF1zKud3OyC61nZDwWvBL2DApaVFogI07CprggiQAOOjvp2NRjYWFzPyu7vwtDrQFq8jeSA=="
}
}
},
"ffi-napi": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/ffi-napi/-/ffi-napi-3.1.0.tgz",
"integrity": "sha512-EsHO+sP2p/nUC/3l/l8m9niee1BLm4asUFDzkkBGR4kYVgp2KqdAYUomZhkKtzim4Fq7mcYHjpUaIHsMqs+E1g==",
"requires": {
"debug": "^4.1.1",
"get-uv-event-loop-napi-h": "^1.0.5",
"node-addon-api": "^2.0.0",
"node-gyp-build": "^4.2.1",
"ref-napi": "^2.0.1",
"ref-struct-di": "^1.1.0"
},
"dependencies": {
"ref-napi": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-2.1.2.tgz",
"integrity": "sha512-aFl+vrIuLWUXMUTQGAwGAuSNLX3Ub5W3iVP8b7KyFFZUdn4+i4U1TXXTop0kCTUfGNu8glBGVz4lowkwMcPVVA==",
"requires": {
"debug": "^4.1.1",
"get-symbol-from-current-process-h": "^1.0.2",
"node-addon-api": "^2.0.0",
"node-gyp-build": "^4.2.1"
}
}
}
},
"get-symbol-from-current-process-h": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz",
"integrity": "sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw=="
},
"get-uv-event-loop-napi-h": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz",
"integrity": "sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg==",
"requires": {
"get-symbol-from-current-process-h": "^1.0.1"
}
},
"ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"next-tick": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz",
"integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw="
},
"node-addon-api": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-2.0.2.tgz",
"integrity": "sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA=="
},
"node-gyp-build": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz",
"integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg=="
},
"ref-array-napi": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/ref-array-napi/-/ref-array-napi-1.2.1.tgz",
"integrity": "sha512-jQp2WWSucmxkqVfoNfm7yDlDeGu3liAbzqfwjNybL80ooLOCnCZpAK2woDInY+lxNOK/VlIVSqeDEYb4gVPuNQ==",
"requires": {
"array-index": "1",
"debug": "2",
"ref-napi": "^1.4.2"
},
"dependencies": {
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"requires": {
"ms": "2.0.0"
}
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"ref-napi": {
"version": "1.5.2",
"resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz",
"integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==",
"requires": {
"debug": "^3.1.0",
"node-addon-api": "^2.0.0",
"node-gyp-build": "^4.2.1"
},
"dependencies": {
"debug": {
"version": "3.2.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
"requires": {
"ms": "^2.1.1"
}
},
"ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
}
}
},
"ref-napi": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.1.tgz",
"integrity": "sha512-W3rcb0E+tlO9u9ySFnX5vifInwwPGToOfFgTZUHJBNiOBsW0NNvgHz2zJN7ctABo/2yIlgdPQUvuqqfORIF4LA==",
"requires": {
"debug": "^4.1.1",
"get-symbol-from-current-process-h": "^1.0.2",
"node-addon-api": "^2.0.0",
"node-gyp-build": "^4.2.1"
}
},
"ref-struct-di": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz",
"integrity": "sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g==",
"requires": {
"debug": "^3.1.0"
},
"dependencies": {
"debug": {
"version": "3.2.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
"requires": {
"ms": "^2.1.1"
}
}
}
},
"ref-struct-napi": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ref-struct-napi/-/ref-struct-napi-1.1.1.tgz",
"integrity": "sha512-YgS5/d7+kT5zgtySYI5ieH0hREdv+DabgDvoczxsui0f9VLm0rrDcWEj4DHKehsH+tJnVMsLwuyctWgvdEcVRw==",
"requires": {
"debug": "2",
"ref-napi": "^1.4.2"
},
"dependencies": {
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"requires": {
"ms": "2.0.0"
}
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"ref-napi": {
"version": "1.5.2",
"resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-1.5.2.tgz",
"integrity": "sha512-hwyNmWpUkt1bDWDW4aiwCoC+SJfJO69UIdjqssNqdaS0sYJpgqzosGg/rLtk69UoQ8drZdI9yyQefM7eEMM3Gw==",
"requires": {
"debug": "^3.1.0",
"node-addon-api": "^2.0.0",
"node-gyp-build": "^4.2.1"
},
"dependencies": {
"debug": {
"version": "3.2.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
"integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
"requires": {
"ms": "^2.1.1"
}
},
"ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
}
}
},
"type": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz",
"integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg=="
}
}
}
{ {
"name": "td2.0-connector", "name": "td2.0-connector",
"version": "2.0.6", "version": "2.0.7",
"description": "A Node.js connector for TDengine.", "description": "A Node.js connector for TDengine.",
"main": "tdengine.js", "main": "tdengine.js",
"directories": { "directories": {
......
...@@ -219,6 +219,7 @@ int32_t* taosGetErrno(); ...@@ -219,6 +219,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") #define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied")
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") #define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing")
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state") #define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state")
#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing")
// tsdb // tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID")
......
...@@ -189,6 +189,8 @@ typedef struct { ...@@ -189,6 +189,8 @@ typedef struct {
} SColDes; } SColDes;
/* Used by main to communicate with parse_opt. */ /* Used by main to communicate with parse_opt. */
static char *g_dupstr = NULL;
typedef struct SArguments_S { typedef struct SArguments_S {
char * metaFile; char * metaFile;
uint32_t test_mode; uint32_t test_mode;
...@@ -361,7 +363,7 @@ typedef struct SDbs_S { ...@@ -361,7 +363,7 @@ typedef struct SDbs_S {
typedef struct SpecifiedQueryInfo_S { typedef struct SpecifiedQueryInfo_S {
uint64_t queryInterval; // 0: unlimit > 0 loop/s uint64_t queryInterval; // 0: unlimit > 0 loop/s
uint64_t concurrent; uint32_t concurrent;
uint64_t sqlCount; uint64_t sqlCount;
uint32_t asyncMode; // 0: sync, 1: async uint32_t asyncMode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms uint64_t subscribeInterval; // ms
...@@ -372,6 +374,9 @@ typedef struct SpecifiedQueryInfo_S { ...@@ -372,6 +374,9 @@ typedef struct SpecifiedQueryInfo_S {
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
int resubAfterConsume[MAX_QUERY_SQL_COUNT]; int resubAfterConsume[MAX_QUERY_SQL_COUNT];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char topic[MAX_QUERY_SQL_COUNT][32];
int consumed[MAX_QUERY_SQL_COUNT];
TAOS_RES* res[MAX_QUERY_SQL_COUNT];
uint64_t totalQueried; uint64_t totalQueried;
} SpecifiedQueryInfo; } SpecifiedQueryInfo;
...@@ -389,7 +394,7 @@ typedef struct SuperQueryInfo_S { ...@@ -389,7 +394,7 @@ typedef struct SuperQueryInfo_S {
uint64_t sqlCount; uint64_t sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
int resubAfterConsume[MAX_QUERY_SQL_COUNT]; int resubAfterConsume;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char* childTblName; char* childTblName;
...@@ -416,7 +421,8 @@ typedef struct SThreadInfo_S { ...@@ -416,7 +421,8 @@ typedef struct SThreadInfo_S {
int threadID; int threadID;
char db_name[MAX_DB_NAME_SIZE+1]; char db_name[MAX_DB_NAME_SIZE+1];
uint32_t time_precision; uint32_t time_precision;
char fp[4096]; char filePath[4096];
FILE *fp;
char tb_prefix[MAX_TB_NAME_SIZE]; char tb_prefix[MAX_TB_NAME_SIZE];
uint64_t start_table_from; uint64_t start_table_from;
uint64_t end_table_to; uint64_t end_table_to;
...@@ -449,6 +455,7 @@ typedef struct SThreadInfo_S { ...@@ -449,6 +455,7 @@ typedef struct SThreadInfo_S {
// seq of query or subscribe // seq of query or subscribe
uint64_t querySeq; // sequence number of sql command uint64_t querySeq; // sequence number of sql command
TAOS_SUB* tsub;
} threadInfo; } threadInfo;
...@@ -526,12 +533,13 @@ static int taosRandom() ...@@ -526,12 +533,13 @@ static int taosRandom()
#endif // ifdef Windows #endif // ifdef Windows
static void prompt(); static void prompt();
static void prompt2();
static int createDatabasesAndStables(); static int createDatabasesAndStables();
static void createChildTables(); static void createChildTables();
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
static int postProceSql(char *host, struct sockaddr_in *pServAddr, static int postProceSql(char *host, struct sockaddr_in *pServAddr,
uint16_t port, char* sqlstr, char *resultFile); uint16_t port, char* sqlstr, threadInfo *pThreadInfo);
static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq,
int disorderRatio, int disorderRange);
/* ************ Global variables ************ */ /* ************ Global variables ************ */
...@@ -679,8 +687,9 @@ static void printHelp() { ...@@ -679,8 +687,9 @@ static void printHelp() {
"The data_type of columns, default: INT,INT,INT,INT."); "The data_type of columns, default: INT,INT,INT,INT.");
printf("%s%s%s%s\n", indent, "-w", indent, printf("%s%s%s%s\n", indent, "-w", indent,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16"); "The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
printf("%s%s%s%s\n", indent, "-l", indent, printf("%s%s%s%s%d\n", indent, "-l", indent,
"The number of columns per record. Default is 4."); "The number of columns per record. Default is 4. Max values is ",
MAX_NUM_DATATYPE);
printf("%s%s%s%s\n", indent, "-T", indent, printf("%s%s%s%s\n", indent, "-T", indent,
"The number of threads. Default is 10."); "The number of threads. Default is 10.");
printf("%s%s%s%s\n", indent, "-i", indent, printf("%s%s%s%s\n", indent, "-i", indent,
...@@ -724,7 +733,6 @@ static bool isStringNumber(char *input) ...@@ -724,7 +733,6 @@ static bool isStringNumber(char *input)
} }
static void parse_args(int argc, char *argv[], SArguments *arguments) { static void parse_args(int argc, char *argv[], SArguments *arguments) {
char **sptr;
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-f") == 0) { if (strcmp(argv[i], "-f") == 0) {
...@@ -851,20 +859,31 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { ...@@ -851,20 +859,31 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
} }
arguments->database = argv[++i]; arguments->database = argv[++i];
} else if (strcmp(argv[i], "-l") == 0) { } else if (strcmp(argv[i], "-l") == 0) {
if ((argc == i+1) || if (argc == i+1) {
(!isStringNumber(argv[i+1]))) { if (!isStringNumber(argv[i+1])) {
printHelp(); printHelp();
errorPrint("%s", "\n\t-l need a number following!\n"); errorPrint("%s", "\n\t-l need a number following!\n");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
}
} }
arguments->num_of_CPR = atoi(argv[++i]); arguments->num_of_CPR = atoi(argv[++i]);
if (arguments->num_of_CPR > MAX_NUM_DATATYPE) {
printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_DATATYPE);
prompt();
arguments->num_of_CPR = MAX_NUM_DATATYPE;
}
for (int col = arguments->num_of_CPR; col < MAX_NUM_DATATYPE; col++) {
arguments->datatype[col] = NULL;
}
} else if (strcmp(argv[i], "-b") == 0) { } else if (strcmp(argv[i], "-b") == 0) {
if (argc == i+1) { if (argc == i+1) {
printHelp(); printHelp();
errorPrint("%s", "\n\t-b need valid string following!\n"); errorPrint("%s", "\n\t-b need valid string following!\n");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
sptr = arguments->datatype;
++i; ++i;
if (strstr(argv[i], ",") == NULL) { if (strstr(argv[i], ",") == NULL) {
// only one col // only one col
...@@ -881,12 +900,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { ...@@ -881,12 +900,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "-b: Invalid data_type!\n"); errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
sptr[0] = argv[i]; arguments->datatype[0] = argv[i];
} else { } else {
// more than one col // more than one col
int index = 0; int index = 0;
char *dupstr = strdup(argv[i]); g_dupstr = strdup(argv[i]);
char *running = dupstr; char *running = g_dupstr;
char *token = strsep(&running, ","); char *token = strsep(&running, ",");
while(token != NULL) { while(token != NULL) {
if (strcasecmp(token, "INT") if (strcasecmp(token, "INT")
...@@ -899,16 +918,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { ...@@ -899,16 +918,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
&& strcasecmp(token, "BINARY") && strcasecmp(token, "BINARY")
&& strcasecmp(token, "NCHAR")) { && strcasecmp(token, "NCHAR")) {
printHelp(); printHelp();
free(dupstr); free(g_dupstr);
errorPrint("%s", "-b: Invalid data_type!\n"); errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
sptr[index++] = token; arguments->datatype[index++] = token;
token = strsep(&running, ","); token = strsep(&running, ",");
if (index >= MAX_NUM_DATATYPE) break; if (index >= MAX_NUM_DATATYPE) break;
} }
free(dupstr); arguments->datatype[index] = NULL;
sptr[index] = NULL;
} }
} else if (strcmp(argv[i], "-w") == 0) { } else if (strcmp(argv[i], "-w") == 0) {
if ((argc == i+1) || if ((argc == i+1) ||
...@@ -1081,9 +1099,9 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { ...@@ -1081,9 +1099,9 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
} }
} }
verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
if (code != 0) { if (code != 0) {
if (!quiet) { if (!quiet) {
debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res)); errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res));
} }
taos_free_result(res); taos_free_result(res);
...@@ -1101,24 +1119,22 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { ...@@ -1101,24 +1119,22 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
return 0; return 0;
} }
static void appendResultBufToFile(char *resultBuf, char *resultFile) static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo)
{ {
FILE *fp = NULL; pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
if (resultFile[0] != 0) { if (pThreadInfo->fp == NULL) {
fp = fopen(resultFile, "at");
if (fp == NULL) {
errorPrint( errorPrint(
"%s() LN%d, failed to open result file: %s, result will not save to file\n", "%s() LN%d, failed to open result file: %s, result will not save to file\n",
__func__, __LINE__, resultFile); __func__, __LINE__, pThreadInfo->filePath);
return; return;
}
} }
fprintf(fp, "%s", resultBuf); fprintf(pThreadInfo->fp, "%s", resultBuf);
tmfclose(fp); tmfclose(pThreadInfo->fp);
pThreadInfo->fp = NULL;
} }
static void appendResultToFile(TAOS_RES *res, char* resultFile) { static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
TAOS_ROW row = NULL; TAOS_ROW row = NULL;
int num_rows = 0; int num_rows = 0;
int num_fields = taos_field_count(res); int num_fields = taos_field_count(res);
...@@ -1136,10 +1152,11 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) { ...@@ -1136,10 +1152,11 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
// fetch the records row by row // fetch the records row by row
while((row = taos_fetch_row(res))) { while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) { if ((strlen(pThreadInfo->filePath) > 0)
appendResultBufToFile(databuf, resultFile); && (totalLen >= 100*1024*1024 - 32000)) {
totalLen = 0; appendResultBufToFile(databuf, pThreadInfo);
memset(databuf, 0, 100*1024*1024); totalLen = 0;
memset(databuf, 0, 100*1024*1024);
} }
num_rows++; num_rows++;
int len = taos_print_row(temp, row, fields, num_fields); int len = taos_print_row(temp, row, fields, num_fields);
...@@ -1150,8 +1167,10 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) { ...@@ -1150,8 +1167,10 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
} }
verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
__func__, __LINE__, databuf, resultFile); __func__, __LINE__, databuf, pThreadInfo->filePath);
appendResultBufToFile(databuf, resultFile); if (strlen(pThreadInfo->filePath) > 0) {
appendResultBufToFile(databuf, pThreadInfo);
}
free(databuf); free(databuf);
} }
...@@ -1167,16 +1186,14 @@ static void selectAndGetResult( ...@@ -1167,16 +1186,14 @@ static void selectAndGetResult(
return; return;
} }
if ((strlen(pThreadInfo->fp))) { fetchResult(res, pThreadInfo);
appendResultToFile(res, pThreadInfo->fp);
}
taos_free_result(res); taos_free_result(res);
} else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
int retCode = postProceSql( int retCode = postProceSql(
g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port, g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
command, command,
pThreadInfo->fp); pThreadInfo);
if (0 != retCode) { if (0 != retCode) {
printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
} }
...@@ -1709,7 +1726,7 @@ static void printfQueryMeta() { ...@@ -1709,7 +1726,7 @@ static void printfQueryMeta() {
printf("query interval: \033[33m%"PRIu64" ms\033[0m\n", printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryInterval); g_queryInfo.specifiedQueryInfo.queryInterval);
printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times); printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
printf("concurrent: \033[33m%"PRIu64"\033[0m\n", printf("concurrent: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.concurrent); g_queryInfo.specifiedQueryInfo.concurrent);
printf("mod: \033[33m%s\033[0m\n", printf("mod: \033[33m%s\033[0m\n",
(g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync"); (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
...@@ -2006,13 +2023,13 @@ static void printfQuerySystemInfo(TAOS * taos) { ...@@ -2006,13 +2023,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
// show variables // show variables
res = taos_query(taos, "show variables;"); res = taos_query(taos, "show variables;");
//appendResultToFile(res, filename); //fetchResult(res, filename);
xDumpResultToFile(filename, res); xDumpResultToFile(filename, res);
// show dnodes // show dnodes
res = taos_query(taos, "show dnodes;"); res = taos_query(taos, "show dnodes;");
xDumpResultToFile(filename, res); xDumpResultToFile(filename, res);
//appendResultToFile(res, filename); //fetchResult(res, filename);
// show databases // show databases
res = taos_query(taos, "show databases;"); res = taos_query(taos, "show databases;");
...@@ -2048,7 +2065,7 @@ static void printfQuerySystemInfo(TAOS * taos) { ...@@ -2048,7 +2065,7 @@ static void printfQuerySystemInfo(TAOS * taos) {
} }
static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
char* sqlstr, char *resultFile) char* sqlstr, threadInfo *pThreadInfo)
{ {
char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
...@@ -2184,8 +2201,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port ...@@ -2184,8 +2201,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
response_buf[RESP_BUF_LEN - 1] = '\0'; response_buf[RESP_BUF_LEN - 1] = '\0';
printf("Response:\n%s\n", response_buf); printf("Response:\n%s\n", response_buf);
if (resultFile) { if (strlen(pThreadInfo->filePath) > 0) {
appendResultBufToFile(response_buf, resultFile); appendResultBufToFile(response_buf, pThreadInfo);
} }
free(request_buf); free(request_buf);
...@@ -2677,8 +2694,6 @@ static int createSuperTable( ...@@ -2677,8 +2694,6 @@ static int createSuperTable(
snprintf(command, BUFFER_SIZE, snprintf(command, BUFFER_SIZE,
"create table if not exists %s.%s (ts timestamp%s) tags %s", "create table if not exists %s.%s (ts timestamp%s) tags %s",
dbName, superTbl->sTblName, cols, tags); dbName, superTbl->sTblName, cols, tags);
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
errorPrint( "create supertable %s failed!\n\n", errorPrint( "create supertable %s failed!\n\n",
superTbl->sTblName); superTbl->sTblName);
...@@ -2701,7 +2716,6 @@ static int createDatabasesAndStables() { ...@@ -2701,7 +2716,6 @@ static int createDatabasesAndStables() {
for (int i = 0; i < g_Dbs.dbCount; i++) { for (int i = 0; i < g_Dbs.dbCount; i++) {
if (g_Dbs.db[i].drop) { if (g_Dbs.db[i].drop) {
sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName);
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
taos_close(taos); taos_close(taos);
return -1; return -1;
...@@ -2774,7 +2788,6 @@ static int createDatabasesAndStables() { ...@@ -2774,7 +2788,6 @@ static int createDatabasesAndStables() {
" precision \'%s\';", g_Dbs.db[i].dbCfg.precision); " precision \'%s\';", g_Dbs.db[i].dbCfg.precision);
} }
debugPrint("%s() %d command: %s\n", __func__, __LINE__, command);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
taos_close(taos); taos_close(taos);
errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName);
...@@ -2791,8 +2804,6 @@ static int createDatabasesAndStables() { ...@@ -2791,8 +2804,6 @@ static int createDatabasesAndStables() {
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
g_Dbs.db[i].superTbls[j].sTblName); g_Dbs.db[i].superTbls[j].sTblName);
verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command);
ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); ret = queryDbExec(taos, command, NO_INSERT_TYPE, true);
if ((ret != 0) || (g_Dbs.db[i].drop)) { if ((ret != 0) || (g_Dbs.db[i].drop)) {
...@@ -2896,7 +2907,6 @@ static void* createTable(void *sarg) ...@@ -2896,7 +2907,6 @@ static void* createTable(void *sarg)
} }
len = 0; len = 0;
verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer);
if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)){ if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)){
errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer);
free(buffer); free(buffer);
...@@ -2912,7 +2922,6 @@ static void* createTable(void *sarg) ...@@ -2912,7 +2922,6 @@ static void* createTable(void *sarg)
} }
if (0 != len) { if (0 != len) {
verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer);
if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)) { if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)) {
errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer);
} }
...@@ -2948,18 +2957,18 @@ static int startMultiThreadCreateChildTable( ...@@ -2948,18 +2957,18 @@ static int startMultiThreadCreateChildTable(
b = ntables % threads; b = ntables % threads;
for (int64_t i = 0; i < threads; i++) { for (int64_t i = 0; i < threads; i++) {
threadInfo *t_info = infos + i; threadInfo *pThreadInfo = infos + i;
t_info->threadID = i; pThreadInfo->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
t_info->superTblInfo = superTblInfo; pThreadInfo->superTblInfo = superTblInfo;
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
t_info->taos = taos_connect( pThreadInfo->taos = taos_connect(
g_Dbs.host, g_Dbs.host,
g_Dbs.user, g_Dbs.user,
g_Dbs.password, g_Dbs.password,
db_name, db_name,
g_Dbs.port); g_Dbs.port);
if (t_info->taos == NULL) { if (pThreadInfo->taos == NULL) {
errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
__func__, __LINE__, taos_errstr(NULL)); __func__, __LINE__, taos_errstr(NULL));
free(pids); free(pids);
...@@ -2967,14 +2976,14 @@ static int startMultiThreadCreateChildTable( ...@@ -2967,14 +2976,14 @@ static int startMultiThreadCreateChildTable(
return -1; return -1;
} }
t_info->start_table_from = startFrom; pThreadInfo->start_table_from = startFrom;
t_info->ntables = i<b?a+1:a; pThreadInfo->ntables = i<b?a+1:a;
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1; startFrom = pThreadInfo->end_table_to + 1;
t_info->use_metric = true; pThreadInfo->use_metric = true;
t_info->cols = cols; pThreadInfo->cols = cols;
t_info->minDelay = UINT64_MAX; pThreadInfo->minDelay = UINT64_MAX;
pthread_create(pids + i, NULL, createTable, t_info); pthread_create(pids + i, NULL, createTable, pThreadInfo);
} }
for (int i = 0; i < threads; i++) { for (int i = 0; i < threads; i++) {
...@@ -2982,8 +2991,8 @@ static int startMultiThreadCreateChildTable( ...@@ -2982,8 +2991,8 @@ static int startMultiThreadCreateChildTable(
} }
for (int i = 0; i < threads; i++) { for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i; threadInfo *pThreadInfo = infos + i;
taos_close(t_info->taos); taos_close(pThreadInfo->taos);
} }
free(pids); free(pids);
...@@ -3442,16 +3451,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ...@@ -3442,16 +3451,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} }
g_args.interlace_rows = interlaceRows->valueint; g_args.interlace_rows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
prompt2();
g_args.interlace_rows = g_args.num_of_RPR;
}
} else if (!interlaceRows) { } else if (!interlaceRows) {
g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else { } else {
...@@ -3483,6 +3482,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ...@@ -3483,6 +3482,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
__func__, __LINE__); __func__, __LINE__);
goto PARSE_OVER; goto PARSE_OVER;
} else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) {
printf("NOTICE: number of records per request value %"PRIu64" > %d\n\n",
numRecPerReq->valueint, MAX_RECORDS_PER_REQ);
printf(" number of records per request value will be set to %d\n\n",
MAX_RECORDS_PER_REQ);
prompt();
numRecPerReq->valueint = MAX_RECORDS_PER_REQ; numRecPerReq->valueint = MAX_RECORDS_PER_REQ;
} }
g_args.num_of_RPR = numRecPerReq->valueint; g_args.num_of_RPR = numRecPerReq->valueint;
...@@ -3512,6 +3516,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ...@@ -3512,6 +3516,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER; goto PARSE_OVER;
} }
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
prompt();
g_args.interlace_rows = g_args.num_of_RPR;
}
cJSON* dbs = cJSON_GetObjectItem(root, "databases"); cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (!dbs || dbs->type != cJSON_Array) { if (!dbs || dbs->type != cJSON_Array) {
printf("ERROR: failed to read json, databases not found\n"); printf("ERROR: failed to read json, databases not found\n");
...@@ -3968,10 +3982,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ...@@ -3968,10 +3982,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n", printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR); g_args.num_of_RPR);
if (!g_args.answer_yes) { prompt();
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
}
g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR; g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR;
} }
} else if (!interlaceRows) { } else if (!interlaceRows) {
...@@ -4186,7 +4197,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ...@@ -4186,7 +4197,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (concurrent && concurrent->type == cJSON_Number) { if (concurrent && concurrent->type == cJSON_Number) {
if (concurrent->valueint <= 0) { if (concurrent->valueint <= 0) {
errorPrint( errorPrint(
"%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n", "%s() LN%d, query sqlCount %"PRIu64" or concurrent %d is not correct.\n",
__func__, __LINE__, __func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent); g_queryInfo.specifiedQueryInfo.concurrent);
...@@ -4253,24 +4264,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ...@@ -4253,24 +4264,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} }
// sqls // sqls
cJSON* superSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls");
if (!superSqls) { if (!specifiedSqls) {
g_queryInfo.specifiedQueryInfo.sqlCount = 0; g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (superSqls->type != cJSON_Array) { } else if (specifiedSqls->type != cJSON_Array) {
errorPrint("%s() LN%d, failed to read json, super sqls not found\n", errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
__func__, __LINE__); __func__, __LINE__);
goto PARSE_OVER; goto PARSE_OVER;
} else { } else {
int superSqlSize = cJSON_GetArraySize(superSqls); int superSqlSize = cJSON_GetArraySize(specifiedSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) { if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", > MAX_QUERY_SQL_COUNT) {
__func__, __LINE__, MAX_QUERY_SQL_COUNT); errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
__func__, __LINE__,
superSqlSize,
g_queryInfo.specifiedQueryInfo.concurrent,
MAX_QUERY_SQL_COUNT);
goto PARSE_OVER; goto PARSE_OVER;
} }
g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize;
for (int j = 0; j < superSqlSize; ++j) { for (int j = 0; j < superSqlSize; ++j) {
cJSON* sql = cJSON_GetArrayItem(superSqls, j); cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j);
if (sql == NULL) continue; if (sql == NULL) continue;
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
...@@ -4434,16 +4449,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ...@@ -4434,16 +4449,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
} }
// sqls cJSON* superResubAfterConsume =
cJSON* subsqls = cJSON_GetObjectItem(superQuery, "sqls"); cJSON_GetObjectItem(superQuery, "resubAfterConsume");
if (!subsqls) { if (superResubAfterConsume
&& superResubAfterConsume->type == cJSON_Number) {
g_queryInfo.superQueryInfo.resubAfterConsume =
superResubAfterConsume->valueint;
} else if (!superResubAfterConsume) {
//printf("failed to read json, subscribe interval no found\n");
////goto PARSE_OVER;
g_queryInfo.superQueryInfo.resubAfterConsume = 1;
}
// supert table sqls
cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls");
if (!superSqls) {
g_queryInfo.superQueryInfo.sqlCount = 0; g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (subsqls->type != cJSON_Array) { } else if (superSqls->type != cJSON_Array) {
errorPrint("%s() LN%d: failed to read json, super sqls not found\n", errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
__func__, __LINE__); __func__, __LINE__);
goto PARSE_OVER; goto PARSE_OVER;
} else { } else {
int superSqlSize = cJSON_GetArraySize(subsqls); int superSqlSize = cJSON_GetArraySize(superSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) { if (superSqlSize > MAX_QUERY_SQL_COUNT) {
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
__func__, __LINE__, MAX_QUERY_SQL_COUNT); __func__, __LINE__, MAX_QUERY_SQL_COUNT);
...@@ -4452,7 +4479,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ...@@ -4452,7 +4479,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.sqlCount = superSqlSize; g_queryInfo.superQueryInfo.sqlCount = superSqlSize;
for (int j = 0; j < superSqlSize; ++j) { for (int j = 0; j < superSqlSize; ++j) {
cJSON* sql = cJSON_GetArrayItem(subsqls, j); cJSON* sql = cJSON_GetArrayItem(superSqls, j);
if (sql == NULL) continue; if (sql == NULL) continue;
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
...@@ -4465,18 +4492,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ...@@ -4465,18 +4492,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
MAX_QUERY_SQL_LENGTH); MAX_QUERY_SQL_LENGTH);
cJSON* superResubAfterConsume =
cJSON_GetObjectItem(sql, "resubAfterConsume");
if (superResubAfterConsume
&& superResubAfterConsume->type == cJSON_Number) {
g_queryInfo.superQueryInfo.resubAfterConsume[j] =
superResubAfterConsume->valueint;
} else if (!superResubAfterConsume) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.superQueryInfo.resubAfterConsume[j] = 1;
}
cJSON *result = cJSON_GetObjectItem(sql, "result"); cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String if (result != NULL && result->type == cJSON_String
&& result->valuestring != NULL){ && result->valuestring != NULL){
...@@ -4812,10 +4827,12 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k) ...@@ -4812,10 +4827,12 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k)
return affectedRows; return affectedRows;
} }
static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq) static void getTableName(char *pTblName,
threadInfo* pThreadInfo, uint64_t tableSeq)
{ {
SSuperTable* superTblInfo = pThreadInfo->superTblInfo; SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
if (superTblInfo) { if ((superTblInfo)
&& (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable)) {
if (superTblInfo->childTblLimit > 0) { if (superTblInfo->childTblLimit > 0) {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
superTblInfo->childTblName + superTblInfo->childTblName +
...@@ -4854,6 +4871,14 @@ static int64_t generateDataTail( ...@@ -4854,6 +4871,14 @@ static int64_t generateDataTail(
verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch); verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
bool tsRand;
if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
"rand", strlen("rand")))) {
tsRand = true;
} else {
tsRand = false;
}
uint64_t k = 0; uint64_t k = 0;
for (k = 0; k < batch;) { for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE]; char data[MAX_DATA_SIZE];
...@@ -4862,71 +4887,47 @@ static int64_t generateDataTail( ...@@ -4862,71 +4887,47 @@ static int64_t generateDataTail(
int64_t retLen = 0; int64_t retLen = 0;
if (superTblInfo) { if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->dataSource, if (tsRand) {
"sample", strlen("sample"))) { retLen = generateRowData(
data,
startTime + getTSRandTail(
superTblInfo->timeStampStep, k,
superTblInfo->disorderRatio,
superTblInfo->disorderRange),
superTblInfo);
} else {
retLen = getRowDataFromSample( retLen = getRowDataFromSample(
data, data,
remainderBufLen, remainderBufLen,
startTime + superTblInfo->timeStampStep * k, startTime + superTblInfo->timeStampStep * k,
superTblInfo, superTblInfo,
pSamplePos); pSamplePos);
} else if (0 == strncasecmp(superTblInfo->dataSource,
"rand", strlen("rand"))) {
int64_t randTail = superTblInfo->timeStampStep * k;
if (superTblInfo->disorderRatio > 0) {
int rand_num = taosRandom() % 100;
if(rand_num < superTblInfo->disorderRatio) {
randTail = (randTail + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1);
debugPrint("rand data generated, back %"PRId64"\n", randTail);
}
} }
if (retLen > remainderBufLen) {
int64_t d = startTime break;
+ randTail;
retLen = generateRowData(
data,
d,
superTblInfo);
}
if (retLen > remainderBufLen) {
break;
}
pstr += snprintf(pstr , retLen + 1, "%s", data);
k++;
len += retLen;
remainderBufLen -= retLen;
} else {
char **data_type = g_args.datatype;
int lenOfBinary = g_args.len_of_binary;
int64_t randTail = DEFAULT_TIMESTAMP_STEP * k;
if (g_args.disorderRatio != 0) {
int rand_num = taosRandom() % 100;
if (rand_num < g_args.disorderRatio) {
randTail = (randTail + (taosRandom() % g_args.disorderRange + 1)) * (-1);
debugPrint("rand data generated, back %"PRId64"\n", randTail);
} }
} else {
randTail = DEFAULT_TIMESTAMP_STEP * k;
}
retLen = generateData(data, data_type, pstr += snprintf(pstr , retLen + 1, "%s", data);
ncols_per_record, k++;
startTime + randTail, len += retLen;
lenOfBinary); remainderBufLen -= retLen;
} else {
if (len > remainderBufLen) char **data_type = g_args.datatype;
break; int lenOfBinary = g_args.len_of_binary;
retLen = generateData(data, data_type,
ncols_per_record,
startTime + getTSRandTail(
DEFAULT_TIMESTAMP_STEP, k,
g_args.disorderRatio,
g_args.disorderRange),
lenOfBinary);
if (len > remainderBufLen)
break;
pstr += sprintf(pstr, "%s", data); pstr += sprintf(pstr, "%s", data);
k++; k++;
len += retLen; len += retLen;
remainderBufLen -= retLen; remainderBufLen -= retLen;
} }
verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n", verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n",
...@@ -5069,6 +5070,22 @@ static int64_t generateInterlaceDataBuffer( ...@@ -5069,6 +5070,22 @@ static int64_t generateInterlaceDataBuffer(
return k; return k;
} }
static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq,
int disorderRatio, int disorderRange)
{
int64_t randTail = timeStampStep * seq;
if (disorderRatio > 0) {
int rand_num = taosRandom() % 100;
if(rand_num < disorderRatio) {
randTail = (randTail +
(taosRandom() % disorderRange + 1)) * (-1);
debugPrint("rand data generated, back %"PRId64"\n", randTail);
}
}
return randTail;
}
static int64_t generateProgressiveDataBuffer( static int64_t generateProgressiveDataBuffer(
char *tableName, char *tableName,
int64_t tableSeq, int64_t tableSeq,
...@@ -5759,19 +5776,13 @@ static void startMultiThreadInsertData(int threads, char* db_name, ...@@ -5759,19 +5776,13 @@ static void startMultiThreadInsertData(int threads, char* db_name,
&& ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit )
> superTblInfo->childTblCount)) { > superTblInfo->childTblCount)) {
printf("WARNING: specified offset + limit > child table count!\n"); printf("WARNING: specified offset + limit > child table count!\n");
if (!g_args.answer_yes) { prompt();
printf(" Press enter key to continue or Ctrl-C to stop\n\n");
(void)getchar();
}
} }
if ((superTblInfo->childTblExists != TBL_NO_EXISTS) if ((superTblInfo->childTblExists != TBL_NO_EXISTS)
&& (0 == superTblInfo->childTblLimit)) { && (0 == superTblInfo->childTblLimit)) {
printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n");
if (!g_args.answer_yes) { prompt();
printf(" Press enter key to continue or Ctrl-C to stop\n\n");
(void)getchar();
}
} }
superTblInfo->childTblName = (char*)calloc(1, superTblInfo->childTblName = (char*)calloc(1,
...@@ -5814,49 +5825,49 @@ static void startMultiThreadInsertData(int threads, char* db_name, ...@@ -5814,49 +5825,49 @@ static void startMultiThreadInsertData(int threads, char* db_name,
} }
for (int i = 0; i < threads; i++) { for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i; threadInfo *pThreadInfo = infos + i;
t_info->threadID = i; pThreadInfo->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
t_info->time_precision = timePrec; pThreadInfo->time_precision = timePrec;
t_info->superTblInfo = superTblInfo; pThreadInfo->superTblInfo = superTblInfo;
t_info->start_time = start_time; pThreadInfo->start_time = start_time;
t_info->minDelay = UINT64_MAX; pThreadInfo->minDelay = UINT64_MAX;
if ((NULL == superTblInfo) || if ((NULL == superTblInfo) ||
(0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) { (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
//t_info->taos = taos; //pThreadInfo->taos = taos;
t_info->taos = taos_connect( pThreadInfo->taos = taos_connect(
g_Dbs.host, g_Dbs.user, g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port); g_Dbs.password, db_name, g_Dbs.port);
if (NULL == t_info->taos) { if (NULL == pThreadInfo->taos) {
errorPrint( errorPrint(
"connect to server fail from insert sub thread, reason: %s\n", "connect to server fail from insert sub thread, reason: %s\n",
taos_errstr(NULL)); taos_errstr(NULL));
exit(-1); exit(-1);
} }
} else { } else {
t_info->taos = NULL; pThreadInfo->taos = NULL;
} }
/* if ((NULL == superTblInfo) /* if ((NULL == superTblInfo)
|| (0 == superTblInfo->multiThreadWriteOneTbl)) { || (0 == superTblInfo->multiThreadWriteOneTbl)) {
*/ */
t_info->start_table_from = startFrom; pThreadInfo->start_table_from = startFrom;
t_info->ntables = i<b?a+1:a; pThreadInfo->ntables = i<b?a+1:a;
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1; startFrom = pThreadInfo->end_table_to + 1;
/* } else { /* } else {
t_info->start_table_from = 0; pThreadInfo->start_table_from = 0;
t_info->ntables = superTblInfo->childTblCount; pThreadInfo->ntables = superTblInfo->childTblCount;
t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint(); pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint();
} }
*/ */
tsem_init(&(t_info->lock_sem), 0, 0); tsem_init(&(pThreadInfo->lock_sem), 0, 0);
if (ASYNC_MODE == g_Dbs.asyncMode) { if (ASYNC_MODE == g_Dbs.asyncMode) {
pthread_create(pids + i, NULL, asyncWrite, t_info); pthread_create(pids + i, NULL, asyncWrite, pThreadInfo);
} else { } else {
pthread_create(pids + i, NULL, syncWrite, t_info); pthread_create(pids + i, NULL, syncWrite, pThreadInfo);
} }
} }
...@@ -5871,27 +5882,27 @@ static void startMultiThreadInsertData(int threads, char* db_name, ...@@ -5871,27 +5882,27 @@ static void startMultiThreadInsertData(int threads, char* db_name,
double avgDelay = 0; double avgDelay = 0;
for (int i = 0; i < threads; i++) { for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i; threadInfo *pThreadInfo = infos + i;
tsem_destroy(&(t_info->lock_sem)); tsem_destroy(&(pThreadInfo->lock_sem));
taos_close(t_info->taos); taos_close(pThreadInfo->taos);
debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
__func__, __LINE__, __func__, __LINE__,
t_info->threadID, t_info->totalInsertRows, pThreadInfo->threadID, pThreadInfo->totalInsertRows,
t_info->totalAffectedRows); pThreadInfo->totalAffectedRows);
if (superTblInfo) { if (superTblInfo) {
superTblInfo->totalAffectedRows += t_info->totalAffectedRows; superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows;
superTblInfo->totalInsertRows += t_info->totalInsertRows; superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows;
} else { } else {
g_args.totalAffectedRows += t_info->totalAffectedRows; g_args.totalAffectedRows += pThreadInfo->totalAffectedRows;
g_args.totalInsertRows += t_info->totalInsertRows; g_args.totalInsertRows += pThreadInfo->totalInsertRows;
} }
totalDelay += t_info->totalDelay; totalDelay += pThreadInfo->totalDelay;
cntDelay += t_info->cntDelay; cntDelay += pThreadInfo->cntDelay;
if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay; if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay;
if (t_info->minDelay < minDelay) minDelay = t_info->minDelay; if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay;
} }
cntDelay -= 1; cntDelay -= 1;
...@@ -5947,26 +5958,26 @@ static void startMultiThreadInsertData(int threads, char* db_name, ...@@ -5947,26 +5958,26 @@ static void startMultiThreadInsertData(int threads, char* db_name,
static void *readTable(void *sarg) { static void *readTable(void *sarg) {
#if 1 #if 1
threadInfo *rinfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = rinfo->taos; TAOS *taos = pThreadInfo->taos;
char command[BUFFER_SIZE] = "\0"; char command[BUFFER_SIZE] = "\0";
uint64_t sTime = rinfo->start_time; uint64_t sTime = pThreadInfo->start_time;
char *tb_prefix = rinfo->tb_prefix; char *tb_prefix = pThreadInfo->tb_prefix;
FILE *fp = fopen(rinfo->fp, "a"); FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) { if (NULL == fp) {
errorPrint( "fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
return NULL; return NULL;
} }
int64_t num_of_DPT; int64_t num_of_DPT;
/* if (rinfo->superTblInfo) { /* if (pThreadInfo->superTblInfo) {
num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table; num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table;
} else { } else {
*/ */
num_of_DPT = g_args.num_of_DPT; num_of_DPT = g_args.num_of_DPT;
// } // }
int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t totalData = num_of_DPT * num_of_tables; int64_t totalData = num_of_DPT * num_of_tables;
bool do_aggreFunc = g_Dbs.do_aggreFunc; bool do_aggreFunc = g_Dbs.do_aggreFunc;
...@@ -6019,17 +6030,17 @@ static void *readTable(void *sarg) { ...@@ -6019,17 +6030,17 @@ static void *readTable(void *sarg) {
static void *readMetric(void *sarg) { static void *readMetric(void *sarg) {
#if 1 #if 1
threadInfo *rinfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = rinfo->taos; TAOS *taos = pThreadInfo->taos;
char command[BUFFER_SIZE] = "\0"; char command[BUFFER_SIZE] = "\0";
FILE *fp = fopen(rinfo->fp, "a"); FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) { if (NULL == fp) {
printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
return NULL; return NULL;
} }
int64_t num_of_DPT = rinfo->superTblInfo->insertRows; int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows;
int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t totalData = num_of_DPT * num_of_tables; int64_t totalData = num_of_DPT * num_of_tables;
bool do_aggreFunc = g_Dbs.do_aggreFunc; bool do_aggreFunc = g_Dbs.do_aggreFunc;
...@@ -6093,19 +6104,11 @@ static void *readMetric(void *sarg) { ...@@ -6093,19 +6104,11 @@ static void *readMetric(void *sarg) {
static void prompt() static void prompt()
{ {
if (!g_args.answer_yes) { if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n"); printf(" Press enter key to continue or Ctrl-C to stop\n\n");
(void)getchar(); (void)getchar();
} }
} }
static void prompt2()
{
if (!g_args.answer_yes) {
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
}
}
static int insertTestProcess() { static int insertTestProcess() {
setupForAnsiEscape(); setupForAnsiEscape();
...@@ -6236,8 +6239,8 @@ static void *specifiedTableQuery(void *sarg) { ...@@ -6236,8 +6239,8 @@ static void *specifiedTableQuery(void *sarg) {
uint64_t lastPrintTime = taosGetTimestampMs(); uint64_t lastPrintTime = taosGetTimestampMs();
uint64_t startTs = taosGetTimestampMs(); uint64_t startTs = taosGetTimestampMs();
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq] != NULL) {
sprintf(pThreadInfo->fp, "%s-%d", sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID); pThreadInfo->threadID);
} }
...@@ -6337,8 +6340,8 @@ static void *superTableQuery(void *sarg) { ...@@ -6337,8 +6340,8 @@ static void *superTableQuery(void *sarg) {
for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
memset(sqlstr,0,sizeof(sqlstr)); memset(sqlstr,0,sizeof(sqlstr));
replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
if (g_queryInfo.superQueryInfo.result[j][0] != 0) { if (g_queryInfo.superQueryInfo.result[j] != NULL) {
sprintf(pThreadInfo->fp, "%s-%d", sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[j], g_queryInfo.superQueryInfo.result[j],
pThreadInfo->threadID); pThreadInfo->threadID);
} }
...@@ -6428,15 +6431,14 @@ static int queryTestProcess() { ...@@ -6428,15 +6431,14 @@ static int queryTestProcess() {
for (uint64_t i = 0; i < nSqlCount; i++) { for (uint64_t i = 0; i < nSqlCount; i++) {
for (int j = 0; j < nConcurrent; j++) { for (int j = 0; j < nConcurrent; j++) {
uint64_t seq = i * nConcurrent + j; uint64_t seq = i * nConcurrent + j;
threadInfo *t_info = infos + seq; threadInfo *pThreadInfo = infos + seq;
t_info->threadID = seq; pThreadInfo->threadID = seq;
t_info->querySeq = i; pThreadInfo->querySeq = i;
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
char sqlStr[MAX_TB_NAME_SIZE*2]; char sqlStr[MAX_TB_NAME_SIZE*2];
sprintf(sqlStr, "use %s", g_queryInfo.dbName); sprintf(sqlStr, "use %s", g_queryInfo.dbName);
verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(taos); taos_close(taos);
free(infos); free(infos);
...@@ -6447,10 +6449,10 @@ static int queryTestProcess() { ...@@ -6447,10 +6449,10 @@ static int queryTestProcess() {
} }
} }
t_info->taos = NULL;// TODO: workaround to use separate taos connection; pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedTableQuery, pthread_create(pids + seq, NULL, specifiedTableQuery,
t_info); pThreadInfo);
} }
} }
} else { } else {
...@@ -6490,15 +6492,15 @@ static int queryTestProcess() { ...@@ -6490,15 +6492,15 @@ static int queryTestProcess() {
uint64_t startFrom = 0; uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) { for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i; threadInfo *pThreadInfo = infosOfSub + i;
t_info->threadID = i; pThreadInfo->threadID = i;
t_info->start_table_from = startFrom; pThreadInfo->start_table_from = startFrom;
t_info->ntables = i<b?a+1:a; pThreadInfo->ntables = i<b?a+1:a;
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1; startFrom = pThreadInfo->end_table_to + 1;
t_info->taos = NULL; // TODO: workaround to use separate taos connection; pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pidsOfSub + i, NULL, superTableQuery, t_info); pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo);
} }
g_queryInfo.superQueryInfo.threadCnt = threads; g_queryInfo.superQueryInfo.threadCnt = threads;
...@@ -6545,7 +6547,7 @@ static void stable_sub_callback( ...@@ -6545,7 +6547,7 @@ static void stable_sub_callback(
} }
if (param) if (param)
appendResultToFile(res, ((threadInfo *)param)->fp); fetchResult(res, (threadInfo *)param);
// tao_unscribe() will free result. // tao_unscribe() will free result.
} }
...@@ -6558,7 +6560,7 @@ static void specified_sub_callback( ...@@ -6558,7 +6560,7 @@ static void specified_sub_callback(
} }
if (param) if (param)
appendResultToFile(res, ((threadInfo *)param)->fp); fetchResult(res, (threadInfo *)param);
// tao_unscribe() will free result. // tao_unscribe() will free result.
} }
...@@ -6602,6 +6604,7 @@ static void *superSubscribe(void *sarg) { ...@@ -6602,6 +6604,7 @@ static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
char subSqlstr[MAX_QUERY_SQL_LENGTH]; char subSqlstr[MAX_QUERY_SQL_LENGTH];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
uint64_t tsubSeq;
if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) {
errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n",
...@@ -6611,18 +6614,15 @@ static void *superSubscribe(void *sarg) { ...@@ -6611,18 +6614,15 @@ static void *superSubscribe(void *sarg) {
} }
if (pThreadInfo->taos == NULL) { if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL; pThreadInfo->taos = taos_connect(g_queryInfo.host,
taos = taos_connect(g_queryInfo.host,
g_queryInfo.user, g_queryInfo.user,
g_queryInfo.password, g_queryInfo.password,
g_queryInfo.dbName, g_queryInfo.dbName,
g_queryInfo.port); g_queryInfo.port);
if (taos == NULL) { if (pThreadInfo->taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL)); pThreadInfo->threadID, taos_errstr(NULL));
return NULL; return NULL;
} else {
pThreadInfo->taos = taos;
} }
} }
...@@ -6638,6 +6638,8 @@ static void *superSubscribe(void *sarg) { ...@@ -6638,6 +6638,8 @@ static void *superSubscribe(void *sarg) {
char topic[32] = {0}; char topic[32] = {0};
for (uint64_t i = pThreadInfo->start_table_from; for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) { i <= pThreadInfo->end_table_to; i++) {
tsubSeq = i - pThreadInfo->start_table_from;
verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n", verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n",
__func__, __LINE__, __func__, __LINE__,
pThreadInfo->threadID, pThreadInfo->threadID,
...@@ -6650,19 +6652,19 @@ static void *superSubscribe(void *sarg) { ...@@ -6650,19 +6652,19 @@ static void *superSubscribe(void *sarg) {
g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
subSqlstr, i); subSqlstr, i);
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d", sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID); pThreadInfo->threadID);
} }
debugPrint("%s() LN%d, [%d] subSqlstr: %s\n", debugPrint("%s() LN%d, [%d] subSqlstr: %s\n",
__func__, __LINE__, pThreadInfo->threadID, subSqlstr); __func__, __LINE__, pThreadInfo->threadID, subSqlstr);
tsub[i] = subscribeImpl( tsub[tsubSeq] = subscribeImpl(
STABLE_CLASS, STABLE_CLASS,
pThreadInfo, subSqlstr, topic, pThreadInfo, subSqlstr, topic,
g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval); g_queryInfo.superQueryInfo.subscribeInterval);
if (NULL == tsub[i]) { if (NULL == tsub[tsubSeq]) {
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
return NULL; return NULL;
} }
...@@ -6675,57 +6677,66 @@ static void *superSubscribe(void *sarg) { ...@@ -6675,57 +6677,66 @@ static void *superSubscribe(void *sarg) {
} }
TAOS_RES* res = NULL; TAOS_RES* res = NULL;
uint64_t st = 0, et = 0;
while(1) { while(1) {
for (uint64_t i = pThreadInfo->start_table_from; for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) { i <= pThreadInfo->end_table_to; i++) {
if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { tsubSeq = i - pThreadInfo->start_table_from;
continue; if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
} continue;
}
res = taos_consume(tsub[i]); st = taosGetTimestampMs();
if (res) { performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et));
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { res = taos_consume(tsub[tsubSeq]);
sprintf(pThreadInfo->fp, "%s-%d", et = taosGetTimestampMs();
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st));
pThreadInfo->threadID);
appendResultToFile(res, pThreadInfo->fp);
}
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
appendResultToFile(res, pThreadInfo->fp);
}
consumed[i] ++;
if ((g_queryInfo.superQueryInfo.subscribeKeepProgress) if (res) {
&& (consumed[i] >= if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
g_queryInfo.superQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { sprintf(pThreadInfo->filePath, "%s-%d",
printf("keepProgress:%d, resub super table query: %"PRIu64"\n", g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
g_queryInfo.superQueryInfo.subscribeKeepProgress, pThreadInfo->threadID);
pThreadInfo->querySeq); fetchResult(res, pThreadInfo);
taos_unsubscribe(tsub, }
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
fetchResult(res, pThreadInfo);
}
consumed[tsubSeq] ++;
if ((g_queryInfo.superQueryInfo.subscribeKeepProgress)
&& (consumed[tsubSeq] >=
g_queryInfo.superQueryInfo.resubAfterConsume)) {
printf("keepProgress:%d, resub super table query: %"PRIu64"\n",
g_queryInfo.superQueryInfo.subscribeKeepProgress,
pThreadInfo->querySeq);
taos_unsubscribe(tsub[tsubSeq],
g_queryInfo.superQueryInfo.subscribeKeepProgress); g_queryInfo.superQueryInfo.subscribeKeepProgress);
consumed[i]= 0; consumed[tsubSeq]= 0;
tsub[i] = subscribeImpl( tsub[tsubSeq] = subscribeImpl(
STABLE_CLASS, STABLE_CLASS,
pThreadInfo, subSqlstr, topic, pThreadInfo, subSqlstr, topic,
g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval g_queryInfo.superQueryInfo.subscribeInterval
); );
if (NULL == tsub[i]) { if (NULL == tsub[tsubSeq]) {
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
return NULL; return NULL;
} }
} }
} }
} }
} }
taos_free_result(res); taos_free_result(res);
for (uint64_t i = pThreadInfo->start_table_from; for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) { i <= pThreadInfo->end_table_to; i++) {
taos_unsubscribe(tsub[i], 0); tsubSeq = i - pThreadInfo->start_table_from;
taos_unsubscribe(tsub[tsubSeq], 0);
} }
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
...@@ -6734,95 +6745,92 @@ static void *superSubscribe(void *sarg) { ...@@ -6734,95 +6745,92 @@ static void *superSubscribe(void *sarg) {
static void *specifiedSubscribe(void *sarg) { static void *specifiedSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS_SUB* tsub = NULL; // TAOS_SUB* tsub = NULL;
if (pThreadInfo->taos == NULL) { if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL; pThreadInfo->taos = taos_connect(g_queryInfo.host,
taos = taos_connect(g_queryInfo.host,
g_queryInfo.user, g_queryInfo.user,
g_queryInfo.password, g_queryInfo.password,
g_queryInfo.dbName, g_queryInfo.dbName,
g_queryInfo.port); g_queryInfo.port);
if (taos == NULL) { if (pThreadInfo->taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL)); pThreadInfo->threadID, taos_errstr(NULL));
return NULL; return NULL;
} else {
pThreadInfo->taos = taos;
} }
} }
char sqlStr[MAX_TB_NAME_SIZE*2]; char sqlStr[MAX_TB_NAME_SIZE*2];
sprintf(sqlStr, "use %s", g_queryInfo.dbName); sprintf(sqlStr, "use %s", g_queryInfo.dbName);
debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
return NULL; return NULL;
} }
char topic[32] = {0}; sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
sprintf(topic, "taosdemo-subscribe-%"PRIu64"", pThreadInfo->querySeq); "taosdemo-subscribe-%"PRIu64"-%d",
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { pThreadInfo->querySeq,
sprintf(pThreadInfo->fp, "%s-%d", pThreadInfo->threadID);
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq] != NULL) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID); pThreadInfo->threadID);
} }
tsub = subscribeImpl( g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
SPECIFIED_CLASS, pThreadInfo, SPECIFIED_CLASS, pThreadInfo,
g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
topic, g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
g_queryInfo.specifiedQueryInfo.subscribeRestart, g_queryInfo.specifiedQueryInfo.subscribeRestart,
g_queryInfo.specifiedQueryInfo.subscribeInterval); g_queryInfo.specifiedQueryInfo.subscribeInterval);
if (NULL == tsub) { if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
return NULL; return NULL;
} }
// start loop to consume result // start loop to consume result
TAOS_RES* res = NULL;
int consumed;
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
while(1) { while(1) {
if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
continue; continue;
} }
res = taos_consume(tsub); g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume(
if (res) { g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]);
if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) {
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d", sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID); pThreadInfo->threadID);
appendResultToFile(res, pThreadInfo->fp); fetchResult(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], pThreadInfo);
} }
consumed ++; g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress) if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress)
&& (consumed >= && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >=
g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
printf("keepProgress:%d, resub specified query: %"PRIu64"\n", printf("keepProgress:%d, resub specified query: %"PRIu64"\n",
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, g_queryInfo.specifiedQueryInfo.subscribeKeepProgress,
pThreadInfo->querySeq); pThreadInfo->querySeq);
consumed = 0; g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
taos_unsubscribe(tsub, taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID],
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
tsub = subscribeImpl( g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
SPECIFIED_CLASS, SPECIFIED_CLASS,
pThreadInfo, pThreadInfo,
g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
topic, g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
g_queryInfo.specifiedQueryInfo.subscribeRestart, g_queryInfo.specifiedQueryInfo.subscribeRestart,
g_queryInfo.specifiedQueryInfo.subscribeInterval); g_queryInfo.specifiedQueryInfo.subscribeInterval);
if (NULL == tsub) { if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
return NULL; return NULL;
} }
} }
} }
} }
taos_free_result(res); taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]);
taos_unsubscribe(tsub, 0); taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->querySeq], 0);
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
return NULL; return NULL;
...@@ -6892,18 +6900,18 @@ static int subscribeTestProcess() { ...@@ -6892,18 +6900,18 @@ static int subscribeTestProcess() {
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
threadInfo *t_info = infos + seq; threadInfo *pThreadInfo = infos + seq;
t_info->threadID = seq; pThreadInfo->threadID = seq;
t_info->querySeq = i; pThreadInfo->querySeq = i;
t_info->taos = NULL; // TODO: workaround to use separate taos connection; pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedSubscribe, t_info); pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo);
} }
} }
} }
//==== create threads for super table query //==== create threads for super table query
if (g_queryInfo.superQueryInfo.sqlCount <= 0) { if (g_queryInfo.superQueryInfo.sqlCount <= 0) {
printf("%s() LN%d, super table query sqlCount %"PRIu64".\n", debugPrint("%s() LN%d, super table query sqlCount %"PRIu64".\n",
__func__, __LINE__, __func__, __LINE__,
g_queryInfo.superQueryInfo.sqlCount); g_queryInfo.superQueryInfo.sqlCount);
} else { } else {
...@@ -6942,17 +6950,17 @@ static int subscribeTestProcess() { ...@@ -6942,17 +6950,17 @@ static int subscribeTestProcess() {
uint64_t startFrom = 0; uint64_t startFrom = 0;
for (int j = 0; j < threads; j++) { for (int j = 0; j < threads; j++) {
uint64_t seq = i * threads + j; uint64_t seq = i * threads + j;
threadInfo *t_info = infosOfStable + seq; threadInfo *pThreadInfo = infosOfStable + seq;
t_info->threadID = seq; pThreadInfo->threadID = seq;
t_info->querySeq = i; pThreadInfo->querySeq = i;
t_info->start_table_from = startFrom; pThreadInfo->start_table_from = startFrom;
t_info->ntables = j<b?a+1:a; pThreadInfo->ntables = j<b?a+1:a;
t_info->end_table_to = j<b?startFrom+a:startFrom+a-1; pThreadInfo->end_table_to = j<b?startFrom+a:startFrom+a-1;
startFrom = t_info->end_table_to + 1; startFrom = pThreadInfo->end_table_to + 1;
t_info->taos = NULL; // TODO: workaround to use separate taos connection; pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pidsOfStable + seq, pthread_create(pidsOfStable + seq,
NULL, superSubscribe, t_info); NULL, superSubscribe, pThreadInfo);
} }
} }
...@@ -7181,7 +7189,6 @@ static void querySqlFile(TAOS* taos, char* sqlFile) ...@@ -7181,7 +7189,6 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
} }
memcpy(cmd + cmd_len, line, read_len); memcpy(cmd + cmd_len, line, read_len);
verbosePrint("%s() LN%d cmd: %s\n", __func__, __LINE__, cmd);
if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) {
errorPrint("%s() LN%d, queryDbExec %s failed!\n", errorPrint("%s() LN%d, queryDbExec %s failed!\n",
__func__, __LINE__, cmd); __func__, __LINE__, cmd);
...@@ -7231,47 +7238,47 @@ static void queryResult() { ...@@ -7231,47 +7238,47 @@ static void queryResult() {
// query data // query data
pthread_t read_id; pthread_t read_id;
threadInfo *rInfo = malloc(sizeof(threadInfo)); threadInfo *pThreadInfo = malloc(sizeof(threadInfo));
assert(rInfo); assert(pThreadInfo);
rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
rInfo->start_table_from = 0; pThreadInfo->start_table_from = 0;
//rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; //pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
if (g_args.use_metric) { if (g_args.use_metric) {
rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
tstrncpy(rInfo->tb_prefix, tstrncpy(pThreadInfo->tb_prefix,
g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
} else { } else {
rInfo->ntables = g_args.num_of_tables; pThreadInfo->ntables = g_args.num_of_tables;
rInfo->end_table_to = g_args.num_of_tables -1; pThreadInfo->end_table_to = g_args.num_of_tables -1;
tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
} }
rInfo->taos = taos_connect( pThreadInfo->taos = taos_connect(
g_Dbs.host, g_Dbs.host,
g_Dbs.user, g_Dbs.user,
g_Dbs.password, g_Dbs.password,
g_Dbs.db[0].dbName, g_Dbs.db[0].dbName,
g_Dbs.port); g_Dbs.port);
if (rInfo->taos == NULL) { if (pThreadInfo->taos == NULL) {
errorPrint( "Failed to connect to TDengine, reason:%s\n", errorPrint( "Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL)); taos_errstr(NULL));
free(rInfo); free(pThreadInfo);
exit(-1); exit(-1);
} }
tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN); tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
if (!g_Dbs.use_metric) { if (!g_Dbs.use_metric) {
pthread_create(&read_id, NULL, readTable, rInfo); pthread_create(&read_id, NULL, readTable, pThreadInfo);
} else { } else {
pthread_create(&read_id, NULL, readMetric, rInfo); pthread_create(&read_id, NULL, readMetric, pThreadInfo);
} }
pthread_join(read_id, NULL); pthread_join(read_id, NULL);
taos_close(rInfo->taos); taos_close(pThreadInfo->taos);
free(rInfo); free(pThreadInfo);
} }
static void testCmdLine() { static void testCmdLine() {
...@@ -7325,6 +7332,9 @@ int main(int argc, char *argv[]) { ...@@ -7325,6 +7332,9 @@ int main(int argc, char *argv[]) {
} else { } else {
testCmdLine(); testCmdLine();
} }
if (g_dupstr)
free(g_dupstr);
} }
return 0; return 0;
......
...@@ -712,13 +712,13 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * ...@@ -712,13 +712,13 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
if (action == SDB_ACTION_INSERT) { if (action == SDB_ACTION_INSERT) {
return sdbPerformInsertAction(pHead, pTable); return sdbPerformInsertAction(pHead, pTable);
} else if (action == SDB_ACTION_DELETE) { } else if (action == SDB_ACTION_DELETE) {
if (qtype == TAOS_QTYPE_FWD) { //if (qtype == TAOS_QTYPE_FWD) {
// Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue // Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue
sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused); // sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused);
return TSDB_CODE_SUCCESS; // return TSDB_CODE_SUCCESS;
} else { //} else {
return sdbPerformDeleteAction(pHead, pTable); return sdbPerformDeleteAction(pHead, pTable);
} //}
} else if (action == SDB_ACTION_UPDATE) { } else if (action == SDB_ACTION_UPDATE) {
return sdbPerformUpdateAction(pHead, pTable); return sdbPerformUpdateAction(pHead, pTable);
} else { } else {
......
...@@ -1189,8 +1189,8 @@ static int32_t mnodeFindSuperTableTagIndex(SSTableObj *pStable, const char *tagN ...@@ -1189,8 +1189,8 @@ static int32_t mnodeFindSuperTableTagIndex(SSTableObj *pStable, const char *tagN
static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable; SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, add tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code)); tstrerror(code), pStable->numOfTags);
return code; return code;
} }
......
...@@ -23,14 +23,14 @@ ...@@ -23,14 +23,14 @@
typedef void (*FLinuxSignalHandler)(int32_t signum, siginfo_t *sigInfo, void *context); typedef void (*FLinuxSignalHandler)(int32_t signum, siginfo_t *sigInfo, void *context);
void taosSetSignal(int32_t signum, FSignalHandler sigfp) { void taosSetSignal(int32_t signum, FSignalHandler sigfp) {
struct sigaction act = {{0}}; struct sigaction act; memset(&act, 0, sizeof(act));
#if 1 #if 1
act.sa_flags = SA_SIGINFO; act.sa_flags = SA_SIGINFO;
act.sa_sigaction = (FLinuxSignalHandler)sigfp; act.sa_sigaction = (FLinuxSignalHandler)sigfp;
#else #else
act.sa_handler = sigfp; act.sa_handler = sigfp;
#endif #endif
sigaction(signum, &act, NULL); sigaction(signum, &act, NULL);
} }
void taosIgnSignal(int32_t signum) { void taosIgnSignal(int32_t signum) {
......
...@@ -709,7 +709,7 @@ static void syncChooseMaster(SSyncNode *pNode) { ...@@ -709,7 +709,7 @@ static void syncChooseMaster(SSyncNode *pNode) {
} }
static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
int32_t onlineNum = 0; int32_t onlineNum = 0, arbOnlineNum = 0;
int32_t masterIndex = -1; int32_t masterIndex = -1;
int32_t replica = pNode->replica; int32_t replica = pNode->replica;
...@@ -723,13 +723,15 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { ...@@ -723,13 +723,15 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) {
SSyncPeer *pArb = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]; SSyncPeer *pArb = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA];
if (pArb && pArb->role != TAOS_SYNC_ROLE_OFFLINE) { if (pArb && pArb->role != TAOS_SYNC_ROLE_OFFLINE) {
onlineNum++; onlineNum++;
++arbOnlineNum;
replica = pNode->replica + 1; replica = pNode->replica + 1;
} }
if (onlineNum <= replica * 0.5) { if (onlineNum <= replica * 0.5) {
if (nodeRole != TAOS_SYNC_ROLE_UNSYNCED) { if (nodeRole != TAOS_SYNC_ROLE_UNSYNCED) {
if (nodeRole == TAOS_SYNC_ROLE_MASTER && onlineNum == replica * 0.5 && onlineNum >= 1) { if (nodeRole == TAOS_SYNC_ROLE_MASTER && onlineNum == replica * 0.5 && ((replica > 2 && onlineNum - arbOnlineNum > 1) || pNode->replica < 3)) {
sInfo("vgId:%d, self keep work as master, online:%d replica:%d", pNode->vgId, onlineNum, replica); sInfo("vgId:%d, self keep work as master, online:%d replica:%d", pNode->vgId, onlineNum, replica);
masterIndex = pNode->selfIndex;
} else { } else {
nodeRole = TAOS_SYNC_ROLE_UNSYNCED; nodeRole = TAOS_SYNC_ROLE_UNSYNCED;
sInfo("vgId:%d, self change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica); sInfo("vgId:%d, self change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica);
...@@ -1002,6 +1004,7 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { ...@@ -1002,6 +1004,7 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) {
if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { if (nodeRole == TAOS_SYNC_ROLE_SLAVE) {
// nodeVersion = pHead->version; // nodeVersion = pHead->version;
code = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); code = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL);
syncConfirmForward(pNode->rid, pHead->version, code, false);
} else { } else {
if (nodeSStatus != TAOS_SYNC_STATUS_INIT) { if (nodeSStatus != TAOS_SYNC_STATUS_INIT) {
code = syncSaveIntoBuffer(pPeer, pHead); code = syncSaveIntoBuffer(pPeer, pHead);
...@@ -1404,7 +1407,7 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) { ...@@ -1404,7 +1407,7 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) {
pthread_mutex_lock(&pNode->mutex); pthread_mutex_lock(&pNode->mutex);
for (int32_t i = 0; i < pSyncFwds->fwds; ++i) { for (int32_t i = 0; i < pSyncFwds->fwds; ++i) {
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % SYNC_MAX_FWDS; SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % SYNC_MAX_FWDS;
if (ABS(time - pFwdInfo->time) < 2000) break; if (ABS(time - pFwdInfo->time) < 10000) break;
sDebug("vgId:%d, forward info expired, hver:%" PRIu64 " curtime:%" PRIu64 " savetime:%" PRIu64, pNode->vgId, sDebug("vgId:%d, forward info expired, hver:%" PRIu64 " curtime:%" PRIu64 " savetime:%" PRIu64, pNode->vgId,
pFwdInfo->version, time, pFwdInfo->time); pFwdInfo->version, time, pFwdInfo->time);
......
...@@ -613,7 +613,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { ...@@ -613,7 +613,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
// todo memory leak if there are object with refcount greater than 0 in hash table? // todo memory leak if there are object with refcount greater than 0 in hash table?
taosHashCleanup(pCacheObj->pHashTable); taosHashCleanup(pCacheObj->pHashTable);
taosTrashcanEmpty(pCacheObj, true); taosTrashcanEmpty(pCacheObj, false);
__cache_lock_destroy(pCacheObj); __cache_lock_destroy(pCacheObj);
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
misrepresented as being the original software. misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution. 3. This notice may not be removed or altered from any source distribution.
*/ */
#ifndef _TD_ARM_ #if !defined(_TD_ARM_) && !defined(_TD_MIPS_)
#include <nmmintrin.h> #include <nmmintrin.h>
#endif #endif
......
...@@ -419,7 +419,11 @@ void vnodeDestroy(SVnodeObj *pVnode) { ...@@ -419,7 +419,11 @@ void vnodeDestroy(SVnodeObj *pVnode) {
} }
if (pVnode->tsdb) { if (pVnode->tsdb) {
code = tsdbCloseRepo(pVnode->tsdb, 1); // the deleted vnode does not need to commit, so as to speed up the deletion
int toCommit = 1;
if (pVnode->dropped) toCommit = 0;
code = tsdbCloseRepo(pVnode->tsdb, toCommit);
pVnode->tsdb = NULL; pVnode->tsdb = NULL;
} }
......
...@@ -126,11 +126,16 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) { ...@@ -126,11 +126,16 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) {
} }
void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) { void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) {
void *pVnode = vnodeAcquire(vgId); SVnodeObj *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) { if (pVnode == NULL) {
vError("vgId:%d, vnode not found while confirm forward", vgId); vError("vgId:%d, vnode not found while confirm forward", vgId);
} }
if (code == TSDB_CODE_SYN_CONFIRM_EXPIRED && pVnode->status == TAOS_VN_STATUS_CLOSING) {
vDebug("vgId:%d, db:%s, vnode is closing while confirm forward", vgId, pVnode->db);
code = TSDB_CODE_VND_IS_CLOSING;
}
dnodeSendRpcVWriteRsp(pVnode, wparam, code); dnodeSendRpcVWriteRsp(pVnode, wparam, code);
vnodeRelease(pVnode); vnodeRelease(pVnode);
} }
......
...@@ -37,7 +37,7 @@ pipeline { ...@@ -37,7 +37,7 @@ pipeline {
stage('Parallel test stage') { stage('Parallel test stage') {
parallel { parallel {
stage('pytest') { stage('pytest') {
agent{label '184'} agent{label 'slad1'}
steps { steps {
pre_test() pre_test()
sh ''' sh '''
...@@ -62,7 +62,7 @@ pipeline { ...@@ -62,7 +62,7 @@ pipeline {
} }
stage('test_crash_gen') { stage('test_crash_gen') {
agent{label "185"} agent{label "slad2"}
steps { steps {
pre_test() pre_test()
sh ''' sh '''
...@@ -149,7 +149,7 @@ pipeline { ...@@ -149,7 +149,7 @@ pipeline {
} }
stage('test_valgrind') { stage('test_valgrind') {
agent{label "186"} agent{label "slad3"}
steps { steps {
pre_test() pre_test()
......
def pre_test(){
sh '''
sudo rmtaos||echo 'no taosd installed'
'''
sh '''
cd ${WKC}
git reset --hard
git checkout $BRANCH_NAME
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout $BRANCH_NAME
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/linux/python3/
'''
return 1
}
pipeline {
agent none
environment{
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
}
stages {
stage('Parallel test stage') {
parallel {
stage('pytest') {
agent{label 'slam1'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh pytest
date'''
}
}
stage('test_b1') {
agent{label 'slam2'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1
date'''
}
}
stage('test_crash_gen') {
agent{label "slam3"}
steps {
pre_test()
sh '''
cd ${WKC}/tests/pytest
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
sh'''
systemctl start taosd
sleep 10
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single -DskipTests >/dev/null
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true >/dev/null
cd ${WKC}/tests/examples/JDBC/JDBCDemo/
java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/
cd ${JENKINS_HOME}/workspace/nodejs
node nodejsChecker.js host=localhost
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
sh '''
systemctl stop taosd
cd ${WKC}/tests
./test-all.sh b2
date
'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}
stage('test_valgrind') {
agent{label "slam4"}
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
nohup taosd >/dev/null &
sleep 10
python3 concurrent_inquiry.py -c 1
'''
}
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
date
cd ${WKC}/tests
./test-all.sh b3
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
stage('arm64_build'){
agent{label 'arm64'}
steps{
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage('arm32_build'){
agent{label 'arm32'}
steps{
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post {
success {
emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
}
\ No newline at end of file
...@@ -64,18 +64,25 @@ function runQueryPerfTest { ...@@ -64,18 +64,25 @@ function runQueryPerfTest {
[ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT [ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT
nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taosperf/ > /dev/null 2>&1 & nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taosperf/ > /dev/null 2>&1 &
echoInfo "Wait TDengine to start" echoInfo "Wait TDengine to start"
sleep 300 sleep 60
echoInfo "Run Performance Test" echoInfo "Run Performance Test"
cd $WORK_DIR/TDengine/tests/pytest cd $WORK_DIR/TDengine/tests/pytest
python3 query/queryPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 query/queryPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
mkdir -p /var/lib/perf/
mkdir -p /var/log/perf/
rm -rf /var/lib/perf/*
rm -rf /var/log/perf/*
nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/perf/ > /dev/null 2>&1 &
echoInfo "Wait TDengine to start"
sleep 10
echoInfo "Run Performance Test"
cd $WORK_DIR/TDengine/tests/pytest
python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT
} }
......
...@@ -22,7 +22,7 @@ from queue import Queue, Empty ...@@ -22,7 +22,7 @@ from queue import Queue, Empty
from .shared.config import Config from .shared.config import Config
from .shared.db import DbTarget, DbConn from .shared.db import DbTarget, DbConn
from .shared.misc import Logging, Helper, CrashGenError, Status, Progress, Dice from .shared.misc import Logging, Helper, CrashGenError, Status, Progress, Dice
from .shared.types import DirPath from .shared.types import DirPath, IpcStream
# from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status # from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status
# from crash_gen.db import DbConn, DbTarget # from crash_gen.db import DbConn, DbTarget
...@@ -177,13 +177,12 @@ quorum 2 ...@@ -177,13 +177,12 @@ quorum 2
return "127.0.0.1" return "127.0.0.1"
def getServiceCmdLine(self): # to start the instance def getServiceCmdLine(self): # to start the instance
cmdLine = []
if Config.getConfig().track_memory_leaks: if Config.getConfig().track_memory_leaks:
Logging.info("Invoking VALGRIND on service...") Logging.info("Invoking VALGRIND on service...")
cmdLine = ['valgrind', '--leak-check=yes'] return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()]
# TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control else:
cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() # TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control
return cmdLine return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen()
def _getDnodes(self, dbc): def _getDnodes(self, dbc):
dbc.query("show dnodes") dbc.query("show dnodes")
...@@ -281,16 +280,16 @@ class TdeSubProcess: ...@@ -281,16 +280,16 @@ class TdeSubProcess:
return '[TdeSubProc: pid = {}, status = {}]'.format( return '[TdeSubProc: pid = {}, status = {}]'.format(
self.getPid(), self.getStatus() ) self.getPid(), self.getStatus() )
def getStdOut(self) -> BinaryIO : def getIpcStdOut(self) -> IpcStream :
if self._popen.universal_newlines : # alias of text_mode if self._popen.universal_newlines : # alias of text_mode
raise CrashGenError("We need binary mode for STDOUT IPC") raise CrashGenError("We need binary mode for STDOUT IPC")
# Logging.info("Type of stdout is: {}".format(type(self._popen.stdout))) # Logging.info("Type of stdout is: {}".format(type(self._popen.stdout)))
return typing.cast(BinaryIO, self._popen.stdout) return typing.cast(IpcStream, self._popen.stdout)
def getStdErr(self) -> BinaryIO : def getIpcStdErr(self) -> IpcStream :
if self._popen.universal_newlines : # alias of text_mode if self._popen.universal_newlines : # alias of text_mode
raise CrashGenError("We need binary mode for STDERR IPC") raise CrashGenError("We need binary mode for STDERR IPC")
return typing.cast(BinaryIO, self._popen.stderr) return typing.cast(IpcStream, self._popen.stderr)
# Now it's always running, since we matched the life cycle # Now it's always running, since we matched the life cycle
# def isRunning(self): # def isRunning(self):
...@@ -301,11 +300,6 @@ class TdeSubProcess: ...@@ -301,11 +300,6 @@ class TdeSubProcess:
def _start(self, cmdLine) -> Popen : def _start(self, cmdLine) -> Popen :
ON_POSIX = 'posix' in sys.builtin_module_names ON_POSIX = 'posix' in sys.builtin_module_names
# Sanity check
# if self.subProcess: # already there
# raise RuntimeError("Corrupt process state")
# Prepare environment variables for coverage information # Prepare environment variables for coverage information
# Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment # Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment
...@@ -314,9 +308,8 @@ class TdeSubProcess: ...@@ -314,9 +308,8 @@ class TdeSubProcess:
# print(myEnv) # print(myEnv)
# print("Starting TDengine with env: ", myEnv.items()) # print("Starting TDengine with env: ", myEnv.items())
# print("Starting TDengine via Shell: {}".format(cmdLineStr)) print("Starting TDengine: {}".format(cmdLine))
# useShell = True # Needed to pass environments into it
return Popen( return Popen(
' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine, ' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine,
shell=True, # Always use shell, since we need to pass ENV vars shell=True, # Always use shell, since we need to pass ENV vars
...@@ -732,19 +725,19 @@ class ServiceManagerThread: ...@@ -732,19 +725,19 @@ class ServiceManagerThread:
self._ipcQueue = Queue() # type: Queue self._ipcQueue = Queue() # type: Queue
self._thread = threading.Thread( # First thread captures server OUTPUT self._thread = threading.Thread( # First thread captures server OUTPUT
target=self.svcOutputReader, target=self.svcOutputReader,
args=(subProc.getStdOut(), self._ipcQueue, logDir)) args=(subProc.getIpcStdOut(), self._ipcQueue, logDir))
self._thread.daemon = True # thread dies with the program self._thread.daemon = True # thread dies with the program
self._thread.start() self._thread.start()
time.sleep(0.01) time.sleep(0.01)
if not self._thread.is_alive(): # What happened? if not self._thread.is_alive(): # What happened?
Logging.info("Failed to started process to monitor STDOUT") Logging.info("Failed to start process to monitor STDOUT")
self.stop() self.stop()
raise CrashGenError("Failed to start thread to monitor STDOUT") raise CrashGenError("Failed to start thread to monitor STDOUT")
Logging.info("Successfully started process to monitor STDOUT") Logging.info("Successfully started process to monitor STDOUT")
self._thread2 = threading.Thread( # 2nd thread captures server ERRORs self._thread2 = threading.Thread( # 2nd thread captures server ERRORs
target=self.svcErrorReader, target=self.svcErrorReader,
args=(subProc.getStdErr(), self._ipcQueue, logDir)) args=(subProc.getIpcStdErr(), self._ipcQueue, logDir))
self._thread2.daemon = True # thread dies with the program self._thread2.daemon = True # thread dies with the program
self._thread2.start() self._thread2.start()
time.sleep(0.01) time.sleep(0.01)
...@@ -887,14 +880,19 @@ class ServiceManagerThread: ...@@ -887,14 +880,19 @@ class ServiceManagerThread:
print("\nNon-UTF8 server output: {}\n".format(bChunk.decode('cp437'))) print("\nNon-UTF8 server output: {}\n".format(bChunk.decode('cp437')))
return None return None
def _textChunkGenerator(self, streamIn: BinaryIO, logDir: str, logFile: str def _textChunkGenerator(self, streamIn: IpcStream, logDir: str, logFile: str
) -> Generator[TextChunk, None, None]: ) -> Generator[TextChunk, None, None]:
''' '''
Take an input stream with binary data, produced a generator of decoded Take an input stream with binary data (likely from Popen), produced a generator of decoded
"text chunks", and also save the original binary data in a log file. "text chunks".
Side effect: it also save the original binary data in a log file.
''' '''
os.makedirs(logDir, exist_ok=True) os.makedirs(logDir, exist_ok=True)
logF = open(os.path.join(logDir, logFile), 'wb') logF = open(os.path.join(logDir, logFile), 'wb')
if logF is None:
Logging.error("Failed to open log file (binary write): {}/{}".format(logDir, logFile))
return
for bChunk in iter(streamIn.readline, b''): for bChunk in iter(streamIn.readline, b''):
logF.write(bChunk) # Write to log file immediately logF.write(bChunk) # Write to log file immediately
tChunk = self._decodeBinaryChunk(bChunk) # decode tChunk = self._decodeBinaryChunk(bChunk) # decode
...@@ -902,14 +900,14 @@ class ServiceManagerThread: ...@@ -902,14 +900,14 @@ class ServiceManagerThread:
yield tChunk # TODO: split into actual text lines yield tChunk # TODO: split into actual text lines
# At the end... # At the end...
streamIn.close() # Close the stream streamIn.close() # Close the incoming stream
logF.close() # Close the output file logF.close() # Close the log file
def svcOutputReader(self, stdOut: BinaryIO, queue, logDir: str): def svcOutputReader(self, ipcStdOut: IpcStream, queue, logDir: str):
''' '''
The infinite routine that processes the STDOUT stream for the sub process being managed. The infinite routine that processes the STDOUT stream for the sub process being managed.
:param stdOut: the IO stream object used to fetch the data from :param ipcStdOut: the IO stream object used to fetch the data from
:param queue: the queue where we dump the roughly parsed chunk-by-chunk text data :param queue: the queue where we dump the roughly parsed chunk-by-chunk text data
:param logDir: where we should dump a verbatim output file :param logDir: where we should dump a verbatim output file
''' '''
...@@ -917,7 +915,7 @@ class ServiceManagerThread: ...@@ -917,7 +915,7 @@ class ServiceManagerThread:
# Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# print("This is the svcOutput Reader...") # print("This is the svcOutput Reader...")
# stdOut.readline() # Skip the first output? TODO: remove? # stdOut.readline() # Skip the first output? TODO: remove?
for tChunk in self._textChunkGenerator(stdOut, logDir, 'stdout.log') : for tChunk in self._textChunkGenerator(ipcStdOut, logDir, 'stdout.log') :
queue.put(tChunk) # tChunk garanteed not to be None queue.put(tChunk) # tChunk garanteed not to be None
self._printProgress("_i") self._printProgress("_i")
...@@ -940,12 +938,12 @@ class ServiceManagerThread: ...@@ -940,12 +938,12 @@ class ServiceManagerThread:
Logging.info("EOF found TDengine STDOUT, marking the process as terminated") Logging.info("EOF found TDengine STDOUT, marking the process as terminated")
self.setStatus(Status.STATUS_STOPPED) self.setStatus(Status.STATUS_STOPPED)
def svcErrorReader(self, stdErr: BinaryIO, queue, logDir: str): def svcErrorReader(self, ipcStdErr: IpcStream, queue, logDir: str):
# os.makedirs(logDir, exist_ok=True) # os.makedirs(logDir, exist_ok=True)
# logFile = os.path.join(logDir,'stderr.log') # logFile = os.path.join(logDir,'stderr.log')
# fErr = open(logFile, 'wb') # fErr = open(logFile, 'wb')
# for line in iter(err.readline, b''): # for line in iter(err.readline, b''):
for tChunk in self._textChunkGenerator(stdErr, logDir, 'stderr.log') : for tChunk in self._textChunkGenerator(ipcStdErr, logDir, 'stderr.log') :
queue.put(tChunk) # tChunk garanteed not to be None queue.put(tChunk) # tChunk garanteed not to be None
# fErr.write(line) # fErr.write(line)
Logging.info("TDengine STDERR: {}".format(tChunk)) Logging.info("TDengine STDERR: {}".format(tChunk))
......
from typing import Any, List, Dict, NewType from typing import Any, BinaryIO, List, Dict, NewType
from enum import Enum from enum import Enum
DirPath = NewType('DirPath', str) DirPath = NewType('DirPath', str)
...@@ -26,3 +26,5 @@ class TdDataType(Enum): ...@@ -26,3 +26,5 @@ class TdDataType(Enum):
TdColumns = Dict[str, TdDataType] TdColumns = Dict[str, TdDataType]
TdTags = Dict[str, TdDataType] TdTags = Dict[str, TdDataType]
IpcStream = NewType('IpcStream', BinaryIO)
\ No newline at end of file
...@@ -183,7 +183,7 @@ python3 ./test.py -f stable/query_after_reset.py ...@@ -183,7 +183,7 @@ python3 ./test.py -f stable/query_after_reset.py
# perfbenchmark # perfbenchmark
python3 ./test.py -f perfbenchmark/bug3433.py python3 ./test.py -f perfbenchmark/bug3433.py
#python3 ./test.py -f perfbenchmark/bug3589.py #python3 ./test.py -f perfbenchmark/bug3589.py
python3 ./test.py -f perfbenchmark/taosdemoInsert.py
#query #query
python3 ./test.py -f query/filter.py python3 ./test.py -f query/filter.py
......
...@@ -31,7 +31,7 @@ class insertFromCSVPerformace: ...@@ -31,7 +31,7 @@ class insertFromCSVPerformace:
self.host = "127.0.0.1" self.host = "127.0.0.1"
self.user = "root" self.user = "root"
self.password = "taosdata" self.password = "taosdata"
self.config = "/etc/taosperf" self.config = "/etc/perf"
self.conn = taos.connect( self.conn = taos.connect(
self.host, self.host,
self.user, self.user,
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import sys
import os
import json
import argparse
import subprocess
import datetime
import re
from multiprocessing import cpu_count
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.dnodes import TDDnode
class Taosdemo:
def __init__(self, clearCache, dbName, keep):
self.clearCache = clearCache
self.dbname = dbName
self.drop = "yes"
self.keep = keep
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
# self.config = "/etc/taosperf"
# self.conn = taos.connect(
# self.host,
# self.user,
# self.password,
# self.config)
# env config
def getBuildPath(self) -> str:
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/debug/build/bin")]
break
return buildPath
def getExeToolsDir(self) -> str:
self.debugdir = self.getBuildPath() + "/debug/build/bin"
return self.debugdir
def getCfgDir(self) -> str:
self.config = self.getBuildPath() + "/sim/dnode1/cfg"
return self.config
# taodemo insert file config
def dbinfocfg(self) -> dict:
return {
"name": self.dbname,
"drop": self.drop,
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": self.keep,
"minRows": 100,
"maxRows": 4096,
"comp": 2,
"walLevel": 1,
"cachelast": 0,
"quorum": 1,
"fsync": 3000,
"update": 0
}
def type_check(func):
def wrapper(self, **kwargs):
num_types = ["int", "float", "bigint", "tinyint", "smallint", "double"]
str_types = ["binary", "nchar"]
for k ,v in kwargs.items():
if k.lower() not in num_types and k.lower() not in str_types:
return f"args {k} type error, not allowed"
elif not isinstance(v, (int, list, tuple)):
return f"value {v} type error, not allowed"
elif k.lower() in num_types and not isinstance(v, int):
return f"arg {v} takes 1 positional argument must be type int "
elif isinstance(v, (list,tuple)) and len(v) > 2:
return f"arg {v} takes from 1 to 2 positional arguments but more than 2 were given "
elif isinstance(v,(list,tuple)) and [ False for _ in v if not isinstance(_, int) ]:
return f"arg {v} takes from 1 to 2 positional arguments must be type int "
else:
pass
return func(self, **kwargs)
return wrapper
@type_check
def column_tag_count(self, **column_tag) -> list :
init_column_tag = []
for k, v in column_tag.items():
if re.search(k, "int, float, bigint, tinyint, smallint, double", re.IGNORECASE):
init_column_tag.append({"type": k, "count": v})
elif re.search(k, "binary, nchar", re.IGNORECASE):
if isinstance(v, int):
init_column_tag.append({"type": k, "count": v, "len":8})
elif len(v) == 1:
init_column_tag.append({"type": k, "count": v[0], "len": 8})
else:
init_column_tag.append({"type": k, "count": v[0], "len": v[1]})
return init_column_tag
def stbcfg(self, stb: str, child_tab_count: int, rows: int, prechildtab: str, columns: dict, tags: dict) -> dict:
return {
"name": stb,
"child_table_exists": "no",
"childtable_count": child_tab_count,
"childtable_prefix": prechildtab,
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": rows,
"childtable_limit": 0,
"childtable_offset": 0,
"rows_per_tbl": 1,
"max_sql_len": 65480,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": f"{datetime.datetime.now():%F %X}",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": self.column_tag_count(**columns),
"tags": self.column_tag_count(**tags)
}
def schemecfg(self,intcount=1,floatcount=0,bcount=0,tcount=0,scount=0,doublecount=0,binarycount=0,ncharcount=0):
return {
"INT": intcount,
"FLOAT": floatcount,
"BIGINT": bcount,
"TINYINT": tcount,
"SMALLINT": scount,
"DOUBLE": doublecount,
"BINARY": binarycount,
"NCHAR": ncharcount
}
def insertcfg(self,db: dict, stbs: list) -> dict:
return {
"filetype": "insert",
"cfgdir": self.config,
"host": self.host,
"port": 6030,
"user": self.user,
"password": self.password,
"thread_count": cpu_count(),
"thread_count_create_tbl": cpu_count(),
"result_file": "/tmp/insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 100,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": db,
"super_tables": stbs
}]
}
def createinsertfile(self,db: dict, stbs: list) -> str:
date = datetime.datetime.now()
file_create_table = f"/tmp/insert_{date:%F-%H%M}.json"
with open(file_create_table, 'w') as f:
json.dump(self.insertcfg(db, stbs), f)
return file_create_table
# taosdemo query file config
def querysqls(self, sql: str) -> list:
return [{"sql":sql,"result":""}]
def querycfg(self, sql: str) -> dict:
return {
"filetype": "query",
"cfgdir": self.config,
"host": self.host,
"port": 6030,
"user": self.user,
"password": self.password,
"confirm_parameter_prompt": "yes",
"query_times": 10,
"query_mode": "taosc",
"databases": self.dbname,
"specified_table_query": {
"query_interval": 0,
"concurrent": cpu_count(),
"sqls": self.querysqls(sql)
}
}
def createqueryfile(self, sql: str):
date = datetime.datetime.now()
file_query_table = f"/tmp/query_{date:%F-%H%M}.json"
with open(file_query_table,"w") as f:
json.dump(self.querycfg(sql), f)
return file_query_table
# Execute taosdemo, and delete temporary files when finished
def taosdemotable(self, filepath: str, resultfile="/dev/null"):
taosdemopath = self.getBuildPath() + "/debug/build/bin"
with open(filepath,"r") as f:
filetype = json.load(f)["filetype"]
if filetype == "insert":
taosdemo_table_cmd = f"{taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1"
else:
taosdemo_table_cmd = f"yes | {taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1"
try:
_ = subprocess.check_output(taosdemo_table_cmd, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
_ = e.output
def droptmpfile(self, filepath: str):
drop_file_cmd = f"[ -f {filepath} ] && rm -f {filepath}"
try:
_ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
_ = e.output
# TODO:需要完成TD-4153的数据插入和客户端请求的性能查询。
def td4153insert(self):
tdLog.printNoPrefix("========== start to create table and insert data ==========")
self.dbname = "td4153"
db = self.dbinfocfg()
stblist = []
columntype = self.schemecfg(intcount=1, ncharcount=100)
tagtype = self.schemecfg(intcount=1)
stbname = "stb1"
prechild = "t1"
stable = self.stbcfg(
stb=stbname,
prechildtab=prechild,
child_tab_count=2,
rows=10000,
columns=columntype,
tags=tagtype
)
stblist.append(stable)
insertfile = self.createinsertfile(db=db, stbs=stblist)
nmon_file = f"/tmp/insert_{datetime.datetime.now():%F-%H%M}.nmon"
cmd = f"nmon -s5 -F {nmon_file} -m /tmp/"
try:
_ = subprocess.check_output(cmd, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
_ = e.output
self.taosdemotable(insertfile)
self.droptmpfile(insertfile)
self.droptmpfile("/tmp/insert_res.txt")
# In order to prevent too many performance files from being generated, the nmon file is deleted.
# and the delete statement can be cancelled during the actual test.
self.droptmpfile(nmon_file)
cmd = f"ps -ef|grep -w nmon| grep -v grep | awk '{{print $2}}'"
try:
time.sleep(10)
_ = subprocess.check_output(cmd,shell=True).decode("utf-8")
except BaseException as e:
raise e
def td4153query(self):
tdLog.printNoPrefix("========== start to query operation ==========")
sqls = {
"select_all": "select * from stb1",
"select_join": "select * from t10, t11 where t10.ts=t11.ts"
}
for type, sql in sqls.items():
result_file = f"/tmp/queryResult_{type}.log"
query_file = self.createqueryfile(sql)
try:
self.taosdemotable(query_file, resultfile=result_file)
except subprocess.CalledProcessError as e:
out_put = e.output
if result_file:
print(f"execute rows {type.split('_')[1]} sql, the sql is: {sql}")
max_sql_time_cmd = f'''
grep -o Spent.*s {result_file} |awk 'NR==1{{max=$2;next}}{{max=max>$2?max:$2}}END{{print "Max=",max,"s"}}'
'''
max_sql_time = subprocess.check_output(max_sql_time_cmd, shell=True).decode("UTF-8")
print(f"{type.split('_')[1]} rows sql time : {max_sql_time}")
min_sql_time_cmd = f'''
grep -o Spent.*s {result_file} |awk 'NR==1{{min=$2;next}}{{min=min<$2?min:$2}}END{{print "Min=",min,"s"}}'
'''
min_sql_time = subprocess.check_output(min_sql_time_cmd, shell=True).decode("UTF-8")
print(f"{type.split('_')[1]} rows sql time : {min_sql_time}")
avg_sql_time_cmd = f'''
grep -o Spent.*s {result_file} |awk '{{sum+=$2}}END{{print "Average=",sum/NR,"s"}}'
'''
avg_sql_time = subprocess.check_output(avg_sql_time_cmd, shell=True).decode("UTF-8")
print(f"{type.split('_')[1]} rows sql time : {avg_sql_time}")
self.droptmpfile(query_file)
self.droptmpfile(result_file)
drop_query_tmt_file_cmd = " find ./ -name 'querySystemInfo-*' -type f -exec rm {} \; "
try:
_ = subprocess.check_output(drop_query_tmt_file_cmd, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
_ = e.output
pass
def td4153(self):
self.td4153insert()
self.td4153query()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-r',
'--remove-cache',
action='store_true',
default=False,
help='clear cache before query (default: False)')
parser.add_argument(
'-d',
'--database-name',
action='store',
default='db',
type=str,
help='Database name to be created (default: db)')
parser.add_argument(
'-k',
'--keep-time',
action='store',
default=3650,
type=int,
help='Database keep parameters (default: 3650)')
args = parser.parse_args()
taosdemo = Taosdemo(args.remove_cache, args.database_name, args.keep_time)
# taosdemo.conn = taos.connect(
# taosdemo.host,
# taosdemo.user,
# taosdemo.password,
# taosdemo.config
# )
debugdir = taosdemo.getExeToolsDir()
cfgdir = taosdemo.getCfgDir()
cmd = f"{debugdir}/taosd -c {cfgdir} >/dev/null 2>&1 &"
try:
_ = subprocess.check_output(cmd, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
_ = e.output
if taosdemo.clearCache:
# must be root permission
subprocess.check_output("echo 3 > /proc/sys/vm/drop_caches", shell=True).decode("utf-8")
taosdemo.td4153()
...@@ -24,7 +24,7 @@ class taosdemoPerformace: ...@@ -24,7 +24,7 @@ class taosdemoPerformace:
self.host = "127.0.0.1" self.host = "127.0.0.1"
self.user = "root" self.user = "root"
self.password = "taosdata" self.password = "taosdata"
self.config = "/etc/taosperf" self.config = "/etc/perf"
self.conn = taos.connect( self.conn = taos.connect(
self.host, self.host,
self.user, self.user,
...@@ -77,7 +77,7 @@ class taosdemoPerformace: ...@@ -77,7 +77,7 @@ class taosdemoPerformace:
insert_data = { insert_data = {
"filetype": "insert", "filetype": "insert",
"cfgdir": "/etc/taosperf", "cfgdir": "/etc/perf",
"host": "127.0.0.1", "host": "127.0.0.1",
"port": 6030, "port": 6030,
"user": "root", "user": "root",
...@@ -104,7 +104,7 @@ class taosdemoPerformace: ...@@ -104,7 +104,7 @@ class taosdemoPerformace:
return output return output
def insertData(self): def insertData(self):
os.system("taosdemo -f %s > taosdemoperf.txt" % self.generateJson()) os.system("taosdemo -f %s > taosdemoperf.txt 2>&1" % self.generateJson())
self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'")
self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'")
self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'")
......
...@@ -847,10 +847,16 @@ sql_error select tbname, t1 from select_tags_mt0 interval(1y); ...@@ -847,10 +847,16 @@ sql_error select tbname, t1 from select_tags_mt0 interval(1y);
#valid sql: select first(c1), last(c2), count(*) from select_tags_mt0 group by tbname, t1; #valid sql: select first(c1), last(c2), count(*) from select_tags_mt0 group by tbname, t1;
#valid sql: select first(c1), tbname, t1 from select_tags_mt0 group by t2; #valid sql: select first(c1), tbname, t1 from select_tags_mt0 group by t2;
print ==================================>TD-4231
sql_error select t1,tbname from select_tags_mt0 where c1<0
sql_error select t1,tbname from select_tags_mt0 where c1<0 and tbname in ('select_tags_tb12')
sql select tbname from select_tags_mt0 where tbname in ('select_tags_tb12');
sql_error select first(c1), last(c2), t1 from select_tags_mt0 group by tbname; sql_error select first(c1), last(c2), t1 from select_tags_mt0 group by tbname;
sql_error select first(c1), last(c2), tbname, t2 from select_tags_mt0 group by tbname; sql_error select first(c1), last(c2), tbname, t2 from select_tags_mt0 group by tbname;
sql_error select first(c1), count(*), t2, t1, tbname from select_tags_mt0 group by tbname; sql_error select first(c1), count(*), t2, t1, tbname from select_tags_mt0 group by tbname;
# this sql is valid: select first(c1), t2 from select_tags_mt0 group by tbname; #valid sql: select first(c1), t2 from select_tags_mt0 group by tbname;
#sql select first(ts), tbname from select_tags_mt0 group by tbname; #sql select first(ts), tbname from select_tags_mt0 group by tbname;
#sql select count(c1) from select_tags_mt0 where c1=99 group by tbname; #sql select count(c1) from select_tags_mt0 where c1=99 group by tbname;
......
...@@ -158,7 +158,7 @@ if $dnode4Vtatus != offline then ...@@ -158,7 +158,7 @@ if $dnode4Vtatus != offline then
sleep 2000 sleep 2000
goto wait_dnode4_vgroup_offline goto wait_dnode4_vgroup_offline
endi endi
if $dnode3Vtatus != master then if $dnode3Vtatus != unsynced then
sleep 2000 sleep 2000
goto wait_dnode4_vgroup_offline goto wait_dnode4_vgroup_offline
endi endi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册