diff --git a/Jenkinsfile b/Jenkinsfile
index 8e8cec764e5b2d22f9c5c46278d8ec76c65c3b4b..7b7b65d029a4e302da01ac2c0221365c7a81dfe3 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -1,4 +1,5 @@
import hudson.model.Result
+import hudson.model.*;
import jenkins.model.CauseOfInterruption
properties([pipelineTriggers([githubPush()])])
node {
@@ -6,6 +7,7 @@ node {
}
def skipbuild=0
+def win_stop=0
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
@@ -110,7 +112,83 @@ def pre_test(){
'''
return 1
}
+def pre_test_win(){
+ bat '''
+ cd C:\\
+ rd /s /Q C:\\TDengine
+ cd C:\\workspace\\TDinternal
+ rd /s /Q C:\\workspace\\TDinternal\\debug
+ cd C:\\workspace\\TDinternal\\community
+ git reset --hard HEAD~10
+ '''
+ script {
+ if (env.CHANGE_TARGET == 'master') {
+ bat '''
+ cd C:\\workspace\\TDinternal\\community
+ git checkout master
+ '''
+ }
+ else if(env.CHANGE_TARGET == '2.0'){
+ bat '''
+ cd C:\\workspace\\TDinternal\\community
+ git checkout 2.0
+ '''
+ }
+ else{
+ bat '''
+ cd C:\\workspace\\TDinternal\\community
+ git checkout develop
+ '''
+ }
+ }
+ bat'''
+ cd C:\\workspace\\TDinternal\\community
+ git pull
+ git fetch origin +refs/pull/%CHANGE_ID%/merge
+ git checkout -qf FETCH_HEAD
+ git clean -dfx
+ cd C:\\workspace\\TDinternal
+ git reset --hard HEAD~10
+ '''
+ script {
+ if (env.CHANGE_TARGET == 'master') {
+ bat '''
+ cd C:\\workspace\\TDinternal
+ git checkout master
+ '''
+ }
+ else if(env.CHANGE_TARGET == '2.0'){
+ bat '''
+ cd C:\\workspace\\TDinternal
+ git checkout 2.0
+ '''
+ }
+ else{
+ bat '''
+ cd C:\\workspace\\TDinternal
+ git checkout develop
+ '''
+ }
+ }
+ bat '''
+ cd C:\\workspace\\TDinternal
+ git pull
+ date
+ git clean -dfx
+ mkdir debug
+ cd debug
+ call "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\vcvarsall.bat" amd64
+ cmake ../ -G "NMake Makefiles"
+ nmake
+ nmake install
+ xcopy /e/y/i/f C:\\workspace\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
+ cd C:\\workspace\\TDinternal\\community\\src\\connector\\python
+ python -m pip install .
+
+ '''
+ return 1
+}
pipeline {
agent none
environment{
@@ -387,7 +465,37 @@ pipeline {
date'''
}
}
- }
+ }
+
+ stage('build'){
+ agent{label " wintest "}
+ steps {
+ pre_test()
+ script{
+ while(win_stop == 0){
+ sleep(1)
+ }
+ }
+ }
+ }
+ stage('test'){
+ agent{label "win"}
+ steps{
+
+ catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
+ pre_test_win()
+ bat'''
+ cd C:\\workspace\\TDinternal\\community\\tests\\pytest
+ .\\test-all.bat Wintest
+ '''
+ }
+ script{
+ win_stop=1
+ }
+ }
+ }
+
+
}
}
}
diff --git a/README.md b/README.md
index d5b6f1fa85b962253fe504fadff78e953d4da598..2f45d9618ecdb08a0d360dfcadb90c02bb8290fa 100644
--- a/README.md
+++ b/README.md
@@ -202,6 +202,19 @@ taos
If TDengine shell connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
+## Install TDengine by apt-get
+
+If you use Debian or Ubuntu system, you can use 'apt-get' command to intall TDengine from official repository. Please use following commands to setup:
+
+```
+wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
+echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
+[Optional] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
+sudo apt-get update
+apt-get policy tdengine
+sudo apt-get install tdengine
+```
+
## Quick Run
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md
index a37afa9212911f4e48efe5e923607f3f2e05422a..83915f4973957a68b51c6f155a857f11f2039e72 100644
--- a/documentation20/cn/02.getting-started/docs.md
+++ b/documentation20/cn/02.getting-started/docs.md
@@ -22,6 +22,18 @@ TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟
具体的安装过程,请参见 [TDengine 多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html) 以及 [视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。
+### 使用 apt-get 安装
+
+如果使用 Debian 或 Ubuntu 系统,也可以使用 apt-get 从官方仓库安装,设置方法为:
+```
+wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
+echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
+[ beta 版安装包仓库为可选安装项 ] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
+sudo apt-get update
+apt-get policy tdengine
+sudo apt-get install tdengine
+```
+
## 轻松启动
diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md
index 50a8c2fabb8c93a847a79a4de47c218de7ccd60a..fa77fee0abd931cf34290cd75b5a8a95090040c4 100644
--- a/documentation20/en/02.getting-started/docs.md
+++ b/documentation20/en/02.getting-started/docs.md
@@ -20,6 +20,19 @@ Three different packages for TDengine server are provided, please pick up the on
Click [here](https://www.taosdata.com/en/getting-started/#Install-from-Package) to download the install package.
+### Install TDengine by apt-get
+
+If you use Debian or Ubuntu system you can use 'apt-get' command to install TDengine from official repository. Please use following commands to setup:
+
+```
+wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
+echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
+[Optional] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
+sudo apt-get update
+apt-get policy tdengine
+sudo apt-get install tdengine
+```
+
## Quick Launch
After installation, you can start the TDengine service by the `systemctl` command.
@@ -218,4 +231,4 @@ Comparison matrix as following:
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
-Please visit Connectors section for more detailed information.
\ No newline at end of file
+Please visit Connectors section for more detailed information.
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
old mode 100755
new mode 100644
index ee2a579c1f1ed99dd2797abdd2ba00e595eb9f8d..a32e7dc093e0b1581e1355c46d1dda31e8b4262e
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -45,8 +45,10 @@ else
inc_link_dir="/usr/local/include"
install_main_dir="/usr/local/Cellar/tdengine/${verNumber}"
-
+ install_main_2_dir="/usr/local/Cellar/tdengine@${verNumber}/${verNumber}"
+
bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin"
+ bin_2_dir="/usr/local/Cellar/tdengine@${verNumber}/${verNumber}/bin"
fi
service_config_dir="/etc/systemd/system"
@@ -121,16 +123,25 @@ function kill_taosd() {
function install_main_path() {
#create install main dir and all sub dir
- ${csudo} rm -rf ${install_main_dir} || :
- ${csudo} mkdir -p ${install_main_dir}
- ${csudo} mkdir -p ${install_main_dir}/cfg
- ${csudo} mkdir -p ${install_main_dir}/bin
- ${csudo} mkdir -p ${install_main_dir}/connector
- ${csudo} mkdir -p ${install_main_dir}/driver
- ${csudo} mkdir -p ${install_main_dir}/examples
- ${csudo} mkdir -p ${install_main_dir}/include
if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
${csudo} mkdir -p ${install_main_dir}/init.d
+ else
+ ${csudo} rm -rf ${install_main_dir} || ${csudo} rm -rf ${install_main_2_dir} || :
+ ${csudo} mkdir -p ${install_main_dir} || ${csudo} mkdir -p ${install_main_2_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg || ${csudo} mkdir -p ${install_main_2_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin || ${csudo} mkdir -p ${install_main_2_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector || ${csudo} mkdir -p ${install_main_2_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver || ${csudo} mkdir -p ${install_main_2_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples || ${csudo} mkdir -p ${install_main_2_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include || ${csudo} mkdir -p ${install_main_2_dir}/include
fi
}
@@ -145,33 +156,34 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
- fi
-
- ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin
- ${csudo} cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
-
- if [ "$osType" != "Darwin" ]; then
+
+ ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin
+ ${csudo} cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
+
${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin
${csudo} cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin
${csudo} cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin
- else
- ${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin
- fi
- ${csudo} chmod 0555 ${install_main_dir}/bin/*
-
- #Make link
- [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
- [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
- [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
- [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
-
- if [ "$osType" != "Darwin" ]; then
+
+ ${csudo} chmod 0555 ${install_main_dir}/bin/*
+ #Make link
+ [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
+ [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
+ [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
+ [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
- fi
+ [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
+ else
- if [ "$osType" != "Darwin" ]; then
- [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
+ ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin || ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_2_dir}/bin || :
+ ${csudo} cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_2_dir} || :
+ ${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin || ${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_2_dir}/bin
+ ${csudo} chmod 0555 ${install_main_dir}/bin/* || ${csudo} chmod 0555 ${install_main_2_dir}/bin/*
+ #Make link
+ [ -x ${install_main_dir}/bin/taos ] || [ -x ${install_main_2_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || ${csudo} ln -s ${install_main_2_dir}/bin/taos || :
+ [ -x ${install_main_dir}/bin/taosd ] || [ -x ${install_main_2_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || ${csudo} ln -s ${install_main_2_dir}/bin/taosd || :
+ [ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
+ [ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
fi
}
@@ -243,12 +255,12 @@ function install_lib() {
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
fi
else
- ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
-
- ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${install_main_dir}/driver/libtaos.1.dylib || :
- ${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${install_main_dir}/driver/libtaos.dylib || :
- ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib || :
- ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib || :
+ ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib ${install_main_dir}/driver || ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib ${install_main_2_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* || ${csudo} chmod 777 ${install_main_2_dir}/driver/*
+
+ ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${install_main_dir}/driver/libtaos.1.dylib || ${csudo} ln -sf ${install_main_2_dir}/driver/libtaos.* ${install_main_2_dir}/driver/libtaos.1.dylib || :
+ ${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${install_main_dir}/driver/libtaos.dylib || ${csudo} ln -sf ${install_main_2_dir}/driver/libtaos.1.dylib ${install_main_2_dir}/driver/libtaos.dylib || :
+ ${csudo} ln -sf ${install_main_dir}/driver/libtaos.${verNumber}.dylib ${lib_link_dir}/libtaos.1.dylib || ${csudo} ln -sf ${install_main_2_dir}/driver/libtaos.${verNumber}.dylib ${lib_link_dir}/libtaos.1.dylib || :
+ ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib || :
fi
install_jemalloc
@@ -261,12 +273,12 @@ function install_lib() {
function install_header() {
if [ "$osType" != "Darwin" ]; then
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
- fi
- ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
- if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+ else
+ ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include || ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_2_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* || ${csudo} chmod 644 ${install_main_2_dir}/include/*
fi
}
@@ -278,23 +290,30 @@ function install_config() {
[ -f ${script_dir}/../cfg/taos.cfg ] &&
${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
- fi
-
- ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
-
- if [ "$osType" != "Darwin" ]; then ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ else
+ ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org || ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_2_dir}/cfg/taos.cfg.org
fi
}
function install_log() {
${csudo} rm -rf ${log_dir} || :
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
- ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+ else
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log || ${csudo} ln -s ${log_dir} ${install_main_2_dir}/log
+ fi
}
function install_data() {
${csudo} mkdir -p ${data_dir}
- ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+ else
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data || ${csudo} ln -s ${data_dir} ${install_main_2_dir}/data
+ fi
}
function install_connector() {
@@ -308,12 +327,21 @@ function install_connector() {
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
- ${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
- ${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
+ ${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
+ else
+ ${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector || ${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_2_dir}/connector}
+ ${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null || cp ${binary_dir}/build/lib/*.jar ${install_main_2_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || ${csudo} chmod 777 ${install_main_2_dir}/connector/*.jar || echo &> /dev/null
+ fi
}
function install_examples() {
- ${csudo} cp -rf ${source_dir}/tests/examples/* ${install_main_dir}/examples
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} cp -rf ${source_dir}/tests/examples/* ${install_main_dir}/examples
+ else
+ ${csudo} cp -rf ${source_dir}/tests/examples/* ${install_main_dir}/examples || ${csudo} cp -rf ${source_dir}/tests/examples/* ${install_main_2_dir}/examples
+ fi
}
function clean_service_on_sysvinit() {
@@ -532,8 +560,16 @@ function install_TDengine() {
## ==============================Main program starts from here============================
echo source directory: $1
echo binary directory: $2
-if [ -x ${bin_dir}/taos ]; then
- update_TDengine
+if [ "$osType" != "Darwin" ]; then
+ if [ -x ${bin_dir}/taos ]; then
+ update_TDengine
+ else
+ install_TDengine
+ fi
else
- install_TDengine
+ if [ -x ${bin_dir}/taos ] || [ -x ${bin_2_dir}/taos ]; then
+ update_TDengine
+ else
+ install_TDengine
+ fi
fi
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index c858bd5867c64da4c7397aed2035119ff414d112..64195b86a1acfb4e7c25a9d51152d02a597347d9 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -41,6 +41,15 @@ extern "C" {
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
+#define UTIL_GET_VGROUPMAP(pSql) \
+ (pSql->pTscObj->pClusterInfo->vgroupMap)
+
+#define UTIL_GET_TABLEMETA(pSql) \
+ (pSql->pTscObj->pClusterInfo->tableMetaMap)
+
+#define UTIL_GET_VGROUPLIST(pSql) \
+ (pSql->pTscObj->pClusterInfo->vgroupListBuf)
+
#pragma pack(push,1)
// this struct is transfered as binary, padding two bytes to avoid
// an 'uid' whose low bytes is 0xff being recoginized as NULL,
@@ -106,10 +115,11 @@ typedef struct SBlockKeyInfo {
SBlockKeyTuple* pKeyTuple;
} SBlockKeyInfo;
+
int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *len);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
-void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
+void tscDestroyDataBlock(SSqlObj *pSql, STableDataBlocks* pDataBlock, bool removeMeta);
void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks* dataBuf);
int tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKeyInfo);
int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows);
@@ -120,12 +130,12 @@ void doRetrieveSubqueryData(SSchedMsg *pMsg);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
uint32_t offset);
-void* tscDestroyBlockArrayList(SArray* pDataBlockList);
+void* tscDestroyBlockArrayList(SSqlObj* pSql, SArray* pDataBlockList);
void* tscDestroyUdfArrayList(SArray* pUdfList);
-void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
+void* tscDestroyBlockHashTable(SSqlObj* pSql, SHashObj* pBlockHashTable, bool removeMeta);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
-int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap);
+int32_t tscMergeTableDataBlocks(SSqlObj *pSql, SInsertStatementParam *pInsertParam, bool freeBlockMap);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList);
@@ -139,6 +149,7 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsIrateQuery(SQueryInfo* pQueryInfo);
+bool tscQueryContainsFunction(SQueryInfo* pQueryInfo, int16_t functionId);
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
@@ -155,7 +166,7 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
-bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo);
+bool tscIsDiffDerivLikeQuery(SQueryInfo* pQueryInfo);
bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
@@ -353,7 +364,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
-int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity, STableMeta **ppStable);
+int32_t tscCreateTableMetaFromSTableMeta(SSqlObj *pSql, STableMeta** ppChild, const char* name, size_t *tableMetaCapacity, STableMeta **ppStable);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
@@ -374,6 +385,7 @@ void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id);
char* cloneCurrentDBName(SSqlObj* pSql);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index dd4ff7eb57f20cfc8d31328630fbb14b7acf7017..eebb471e305132fa5b3b403d149d449d36072ab2 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -48,6 +48,7 @@ struct SSqlInfo;
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows);
+
typedef struct SNewVgroupInfo {
int32_t vgId;
int8_t inUse;
@@ -139,6 +140,13 @@ typedef enum {
ROW_COMPARE_NEED = 1,
} ERowCompareStat;
+typedef struct {
+ void *vgroupMap;
+ void *tableMetaMap;
+ void *vgroupListBuf;
+ int64_t ref;
+} SClusterInfo;
+
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec);
int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols,
@@ -324,6 +332,7 @@ typedef struct STscObj {
char acctId[TSDB_ACCT_ID_LEN];
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
char sversion[TSDB_VERSION_LEN];
+ char clusterId[TSDB_CLUSTER_ID_LEN];
char writeAuth : 1;
char superAuth : 1;
uint32_t connId;
@@ -332,9 +341,11 @@ typedef struct STscObj {
struct SSqlObj * sqlList;
struct SSqlStream *streamList;
SRpcObj *pRpcObj;
+ SClusterInfo *pClusterInfo;
SRpcCorEpSet *tscCorMgmtEpSet;
pthread_mutex_t mutex;
int32_t numOfObj; // number of sqlObj from this tscObj
+
SReqOrigin from;
} STscObj;
@@ -417,6 +428,9 @@ int tscAcquireRpc(const char *key, const char *user, const char *secret,void **
void tscReleaseRpc(void *param);
void tscInitMsgsFp();
+void *tscAcquireClusterInfo(const char *clusterId);
+void tscReleaseClusterInfo(const char *clusterId);
+
int tsParseSql(SSqlObj *pSql, bool initial);
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet);
diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c
index 6acbfe3e8929c9a5a46ed0370f6cfb883988ef3e..14e426ee69f1b11fe09ef23d66190c75a2628e10 100644
--- a/src/client/src/tscGlobalmerge.c
+++ b/src/client/src/tscGlobalmerge.c
@@ -648,7 +648,8 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
for(int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows);
- if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM) {
+ if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM ||
+ pCtx[j].functionId == TSDB_FUNC_SAMPLE) {
if(j > 0) pCtx[j].ptsOutputBuf = pCtx[j - 1].pOutput;
}
}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 07db18b498873f4a023d8ea76aadd7e76a4cd8d2..df9857e97e7b8cc2f3ed8543939bc30d1fe5608b 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -924,8 +924,8 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
- taosHashClear(tscTableMetaMap);
- taosCacheEmpty(tscVgroupListBuf);
+ taosHashClear(UTIL_GET_TABLEMETA(pSql));
+ taosCacheEmpty(UTIL_GET_VGROUPLIST(pSql));
pRes->code = TSDB_CODE_SUCCESS;
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
pRes->code = tscProcessServerVer(pSql);
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 1bf27e6cad1d57fdfd4b786d1cdcea981bf3333b..28a88e75f82d40a9635c2e7add35b2ff7383ea19 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -771,6 +771,10 @@ void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
TSKEY tj = *(TSKEY *)(pBlockData + dataBuf->rowSize * j);
if (ti == tj) {
+ if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
+ memmove(pBlockData + dataBuf->rowSize * i, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
+ }
+
++j;
continue;
}
@@ -841,6 +845,10 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
TSKEY tj = (pBlkKeyTuple + j)->skey;
if (ti == tj) {
+ if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
+ memmove(pBlkKeyTuple + i, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
+ }
+
++j;
continue;
}
@@ -1529,7 +1537,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
// merge according to vgId
if (!TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pInsertParam->pTableBlockHashList) > 0) {
- if ((code = tscMergeTableDataBlocks(pInsertParam, true)) != TSDB_CODE_SUCCESS) {
+ if ((code = tscMergeTableDataBlocks(pSql, pInsertParam, true)) != TSDB_CODE_SUCCESS) {
goto _clean;
}
}
@@ -1635,7 +1643,7 @@ static int doPackSendDataBlock(SSqlObj* pSql, SInsertStatementParam *pInsertPara
return tscInvalidOperationMsg(pInsertParam->msg, "too many rows in sql, total number of rows should be less than 32767", NULL);
}
- if ((code = tscMergeTableDataBlocks(pInsertParam, true)) != TSDB_CODE_SUCCESS) {
+ if ((code = tscMergeTableDataBlocks(pSql, pInsertParam, true)) != TSDB_CODE_SUCCESS) {
return code;
}
@@ -1696,7 +1704,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
SInsertStatementParam *pInsertParam = &pCmd->insertParam;
destroyTableNameList(pInsertParam);
- pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);
+ pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pParentSql, pInsertParam->pDataBlocks);
if (pInsertParam->pTableBlockHashList == NULL) {
pInsertParam->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c
index 22392ba306faeed05af5d695ca0090057ac211cf..382198e8b3e8422651b0072818a73bbd7b5c6feb 100644
--- a/src/client/src/tscParseLineProtocol.c
+++ b/src/client/src/tscParseLineProtocol.c
@@ -574,10 +574,10 @@ static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTabl
char fullTableName[TSDB_TABLE_FNAME_LEN] = {0};
memset(fullTableName, 0, tListLen(fullTableName));
tNameExtractFullName(&sname, fullTableName);
- tscFreeRegisteredSqlObj(pSql);
size_t size = 0;
- taosHashGetCloneExt(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, (void**)&tableMeta, &size);
+ taosHashGetCloneExt(UTIL_GET_TABLEMETA(pSql), fullTableName, strlen(fullTableName), NULL, (void**)&tableMeta, &size);
+ tscFreeRegisteredSqlObj(pSql);
}
if (tableMeta != NULL) {
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index d0ac0ccf4ee4bfa381a78090409a761717ceb4b0..f6a64cb5b1c04b461e38acce2ea1cae65dec71e6 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -1163,7 +1163,7 @@ static int insertStmtExecute(STscStmt* stmt) {
fillTablesColumnsNull(stmt->pSql);
- int code = tscMergeTableDataBlocks(&stmt->pSql->cmd.insertParam, false);
+ int code = tscMergeTableDataBlocks(stmt->pSql, &stmt->pSql->cmd.insertParam, false);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -1194,7 +1194,7 @@ static int insertStmtExecute(STscStmt* stmt) {
pCmd->insertParam.numOfTables = 0;
tfree(pCmd->insertParam.pTableNameList);
- pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
+ pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pSql, pCmd->insertParam.pDataBlocks);
return pSql->res.code;
}
@@ -1215,7 +1215,7 @@ static void insertBatchClean(STscStmt* pStmt) {
tfree(pCmd->insertParam.pTableNameList);
- pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
+ pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pSql, pCmd->insertParam.pDataBlocks);
pCmd->insertParam.numOfTables = 0;
taosHashClear(pCmd->insertParam.pTableBlockHashList);
@@ -1242,7 +1242,7 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
fillTablesColumnsNull(pStmt->pSql);
- if ((code = tscMergeTableDataBlocks(&pStmt->pSql->cmd.insertParam, false)) != TSDB_CODE_SUCCESS) {
+ if ((code = tscMergeTableDataBlocks(pStmt->pSql, &pStmt->pSql->cmd.insertParam, false)) != TSDB_CODE_SUCCESS) {
return code;
}
@@ -1773,8 +1773,8 @@ int taos_stmt_close(TAOS_STMT* stmt) {
if (pStmt->pSql && pStmt->pSql->res.code != 0) {
rmMeta = true;
}
- tscDestroyDataBlock(pStmt->mtb.lastBlock, rmMeta);
- pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, rmMeta);
+ tscDestroyDataBlock(pStmt->pSql, pStmt->mtb.lastBlock, rmMeta);
+ pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->pSql, pStmt->mtb.pTableBlockHashList, rmMeta);
if (pStmt->pSql){
taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
}
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index a62a8ac3efca0836faab778224aa4a831e84e580..2d95760d375738079a079c7cff5da941702cbc86 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -2499,6 +2499,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_MAX:
case TSDB_FUNC_DIFF:
case TSDB_FUNC_DERIVATIVE:
+ case TSDB_FUNC_CSUM:
case TSDB_FUNC_CEIL:
case TSDB_FUNC_FLOOR:
case TSDB_FUNC_ROUND:
@@ -2551,7 +2552,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
// set the first column ts for diff query
- if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
+ if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_CSUM) {
SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false);
@@ -2591,7 +2592,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
- }
+ }
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
@@ -2747,6 +2748,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_TOP:
case TSDB_FUNC_BOTTOM:
+ case TSDB_FUNC_MAVG:
+ case TSDB_FUNC_SAMPLE:
case TSDB_FUNC_PERCT:
case TSDB_FUNC_APERCT: {
// 1. valid the number of parameters
@@ -2778,7 +2781,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
// 2. valid the column type
- if (!IS_NUMERIC_TYPE(pSchema->type)) {
+ if (functionId != TSDB_FUNC_SAMPLE && !IS_NUMERIC_TYPE(pSchema->type)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -2817,11 +2820,38 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, false);
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
+ } else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) {
+ tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
+
+ int64_t numRowsSelected = GET_INT32_VAL(val);
+ if (numRowsSelected <= 0 || numRowsSelected > 1000) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
+ }
+
+ // todo REFACTOR
+ // set the first column ts for top/bottom query
+ int32_t tsFuncId = (functionId == TSDB_FUNC_MAVG) ? TSDB_FUNC_TS_DUMMY : TSDB_FUNC_TS;
+ SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd),
+ 0, false);
+ tstrncpy(pExpr->base.aliasName, aAggs[tsFuncId].name, sizeof(pExpr->base.aliasName));
+
+ const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
+ SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX);
+ insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
+ aAggs[tsFuncId].name, pExpr);
+
+ colIndex += 1; // the first column is ts
+
+ getResultDataInfo(pSchema->type, pSchema->bytes, functionId, (int32_t)numRowsSelected, &resultType, &resultSize, &interResult, 0, false,
+ pUdfInfo);
+ pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, false);
+ tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t));
} else {
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
- int64_t nTop = GET_INT32_VAL(val);
- if (nTop <= 0 || nTop > 100) { // todo use macro
+ int64_t numRowsSelected = GET_INT32_VAL(val);
+ if (numRowsSelected <= 0 || numRowsSelected > 100) { // todo use macro
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
}
@@ -3314,7 +3344,8 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) {
if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) ||
(functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) ||
- (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE)) {
+ (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) ||
+ (functionId == TSDB_FUNC_SAMPLE)) {
if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->base.param[0].i64, &type, &bytes,
&interBytes, 0, true, NULL) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
@@ -3369,8 +3400,8 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) {
}
bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
- const char* msg1 = "TWA/Diff/Derivative/Irate are not allowed to apply to super table directly";
- const char* msg2 = "TWA/Diff/Derivative/Irate only support group by tbname for super table query";
+ const char* msg1 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE are not allowed to apply to super table directly";
+ const char* msg2 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE only support group by tbname for super table query";
const char* msg3 = "functions not support for super table query";
// filter sql function not supported by metric query yet.
@@ -3387,7 +3418,8 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
}
}
- if (tscIsTWAQuery(pQueryInfo) || tscIsDiffDerivQuery(pQueryInfo) || tscIsIrateQuery(pQueryInfo)) {
+ if (tscIsTWAQuery(pQueryInfo) || tscIsDiffDerivLikeQuery(pQueryInfo) || tscIsIrateQuery(pQueryInfo) ||
+ tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_SAMPLE)) {
if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return true;
@@ -5446,7 +5478,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg1 = "value is expected";
const char* msg2 = "invalid fill option";
- const char* msg3 = "top/bottom not support fill";
+ const char* msg3 = "top/bottom/sample not support fill";
const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query";
const char* msg6 = "not supported function now";
@@ -5554,7 +5586,8 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
- if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
+ if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM
+ || pExpr->base.functionId == TSDB_FUNC_SAMPLE) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -6284,7 +6317,9 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
}
int32_t f = pExpr->base.functionId;
- if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE ||
+ if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) ||
+ f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE ||
+ f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG ||
f == TSDB_FUNC_CEIL || f == TSDB_FUNC_FLOOR || f == TSDB_FUNC_ROUND)
{
isProjectionFunction = true;
@@ -7025,7 +7060,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
int32_t f = TSDB_FUNC_TAG;
- if (tscIsDiffDerivQuery(pQueryInfo)) {
+ if (tscIsDiffDerivLikeQuery(pQueryInfo)) {
f = TSDB_FUNC_TAGPRJ;
}
@@ -7170,6 +7205,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
}
if (IS_MULTIOUTPUT(aAggs[f].status) && f != TSDB_FUNC_TOP && f != TSDB_FUNC_BOTTOM && f != TSDB_FUNC_DIFF &&
+ f != TSDB_FUNC_MAVG && f != TSDB_FUNC_CSUM && f != TSDB_FUNC_SAMPLE &&
f != TSDB_FUNC_DERIVATIVE && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_PRJ) {
return invalidOperationMsg(msg, msg1);
}
@@ -7188,7 +7224,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
}
// projection query on super table does not compatible with "group by" syntax
- if (tscIsProjectionQuery(pQueryInfo) && !(tscIsDiffDerivQuery(pQueryInfo))) {
+ if (tscIsProjectionQuery(pQueryInfo) && !(tscIsDiffDerivLikeQuery(pQueryInfo))) {
return invalidOperationMsg(msg, msg3);
}
@@ -8280,7 +8316,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
size_t len = strlen(name);
- if (NULL == taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity)) {
+ if (NULL == taosHashGetCloneExt(UTIL_GET_TABLEMETA(pSql), name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity)) {
// not found
tfree(pTableMeta);
}
@@ -8291,7 +8327,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// avoid mem leak, may should update pTableMeta
void* pVgroupIdList = NULL;
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
- code = tscCreateTableMetaFromSTableMeta((STableMeta **)(&pTableMeta), name, &tableMetaCapacity, (STableMeta **)(&pSTMeta));
+ code = tscCreateTableMetaFromSTableMeta(pSql, (STableMeta **)(&pTableMeta), name, &tableMetaCapacity, (STableMeta **)(&pSTMeta));
pSql->pBuf = (void *)pSTMeta;
// create the child table meta from super table failed, try load it from mnode
@@ -8303,7 +8339,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (pTableMeta->tableType == TSDB_SUPER_TABLE) {
// the vgroup list of super table is not kept in local buffer, so here need retrieve it from the mnode each time
tscDebug("0x%"PRIx64" try to acquire cached super table %s vgroup id list", pSql->self, name);
- void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len);
+ void* pv = taosCacheAcquireByKey(UTIL_GET_VGROUPLIST(pSql), name, len);
if (pv == NULL) {
char* t = strdup(name);
taosArrayPush(pVgroupList, &t);
@@ -8316,7 +8352,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
taosArrayAddBatch(pVgroupIdList, pdata->data, (int32_t) pdata->num);
- taosCacheRelease(tscVgroupListBuf, &pv, false);
+ taosCacheRelease(UTIL_GET_VGROUPLIST(pSql), &pv, false);
}
}
@@ -8466,7 +8502,7 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod
// check if current buffer contains the vgroup info. If not, add it
SNewVgroupInfo existVgroupInfo = {.inUse = -1,};
- taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo);
+ taosHashGetClone(UTIL_GET_VGROUPMAP(pSql), id, sizeof(*id), NULL, &existVgroupInfo);
assert(existVgroupInfo.inUse >= 0);
SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j];
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index dcfbc857d5d6792b3796a098ea61046439fc5d0f..b7661d51f910dd320af95acfaae857bb51056b20 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -158,7 +158,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) {
assert(vgId > 0);
SNewVgroupInfo vgroupInfo = {.vgId = -1};
- taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo);
+ taosHashGetClone(UTIL_GET_VGROUPMAP(pSql), &vgId, sizeof(vgId), NULL, &vgroupInfo);
assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0);
tscDebug("before: Endpoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
@@ -170,7 +170,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) {
}
tscDebug("after: EndPoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
- taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo));
+ taosHashPut(UTIL_GET_VGROUPMAP(pSql), &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo));
// Update the local cached epSet info cached by SqlObj
int32_t inUse = pSql->epSet.inUse;
@@ -654,7 +654,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT;
SNewVgroupInfo vgroupInfo = {0};
- taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
+ taosHashGetClone(UTIL_GET_VGROUPMAP(pSql), &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
tscDebug("0x%"PRIx64" submit msg built, numberOfEP:%d", pSql->self, pSql->epSet.numOfEps);
@@ -737,7 +737,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
vgId = pTableMeta->vgId;
SNewVgroupInfo vgroupInfo = {0};
- taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
+ taosHashGetClone(UTIL_GET_VGROUPMAP(pSql), &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
}
@@ -1650,7 +1650,7 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
STableMeta *pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
SNewVgroupInfo vgroupInfo = {.vgId = -1};
- taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
+ taosHashGetClone(UTIL_GET_VGROUPMAP(pSql), &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
assert(vgroupInfo.vgId > 0);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
@@ -2036,21 +2036,21 @@ static int32_t tableMetaMsgConvert(STableMetaMsg* pMetaMsg) {
}
// update the vgroupInfo if needed
-static void doUpdateVgroupInfo(int32_t vgId, SVgroupMsg *pVgroupMsg) {
+static void doUpdateVgroupInfo(SSqlObj *pSql, int32_t vgId, SVgroupMsg *pVgroupMsg) {
assert(vgId > 0);
SNewVgroupInfo vgroupInfo = {.inUse = -1};
- taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo);
+ taosHashGetClone(UTIL_GET_VGROUPMAP(pSql), &vgId, sizeof(vgId), NULL, &vgroupInfo);
// vgroup info exists, compare with it
if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, pVgroupMsg)) || (vgroupInfo.inUse < 0)) {
vgroupInfo = createNewVgroupInfo(pVgroupMsg);
- taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
- tscDebug("add/update new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
+ taosHashPut(UTIL_GET_VGROUPMAP(pSql), &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
+ tscDebug("add/update new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(UTIL_GET_VGROUPMAP(pSql)));
}
}
-static void doAddTableMetaToLocalBuf(STableMeta* pTableMeta, STableMetaMsg* pMetaMsg, bool updateSTable) {
+static void doAddTableMetaToLocalBuf(SSqlObj *pSql, STableMeta* pTableMeta, STableMetaMsg* pMetaMsg, bool updateSTable) {
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
// add or update the corresponding super table meta data info
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
@@ -2059,18 +2059,18 @@ static void doAddTableMetaToLocalBuf(STableMeta* pTableMeta, STableMetaMsg* pMet
if (updateSTable) {
STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg);
uint32_t size = tscGetTableMetaSize(pSupTableMeta);
- int32_t code = taosHashPut(tscTableMetaMap, pTableMeta->sTableName, len, pSupTableMeta, size);
+ int32_t code = taosHashPut(UTIL_GET_TABLEMETA(pSql), pTableMeta->sTableName, len, pSupTableMeta, size);
assert(code == TSDB_CODE_SUCCESS);
tfree(pSupTableMeta);
}
CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
- taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta));
+ taosHashPut(UTIL_GET_TABLEMETA(pSql), pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta));
tfree(cMeta);
} else {
uint32_t s = tscGetTableMetaSize(pTableMeta);
- taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s);
+ taosHashPut(UTIL_GET_TABLEMETA(pSql), pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s);
}
}
@@ -2098,9 +2098,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
tNameExtractFullName(&pTableMetaInfo->name, name);
assert(strncmp(pMetaMsg->tableFname, name, tListLen(pMetaMsg->tableFname)) == 0);
- doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, true);
+ doAddTableMetaToLocalBuf(pSql, pTableMeta, pMetaMsg, true);
if (pTableMeta->tableType != TSDB_SUPER_TABLE) {
- doUpdateVgroupInfo(pTableMeta->vgId, &pMetaMsg->vgroup);
+ doUpdateVgroupInfo(pSql, pTableMeta->vgId, &pMetaMsg->vgroup);
}
tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self,
@@ -2111,7 +2111,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
-static SArray* createVgroupIdListFromMsg(char* pMsg, SHashObj* pSet, char* name, int32_t* size, uint64_t id) {
+static SArray* createVgroupIdListFromMsg(SSqlObj *pSql, char* pMsg, SHashObj* pSet, char* name, int32_t* size, uint64_t id) {
SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg;
pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups);
@@ -2134,7 +2134,7 @@ static SArray* createVgroupIdListFromMsg(char* pMsg, SHashObj* pSet, char* name,
if (taosHashGet(pSet, &vmsg->vgId, sizeof(vmsg->vgId)) == NULL) {
taosHashPut(pSet, &vmsg->vgId, sizeof(vmsg->vgId), "", 0);
- doUpdateVgroupInfo(vmsg->vgId, vmsg);
+ doUpdateVgroupInfo(pSql, vmsg->vgId, vmsg);
}
}
}
@@ -2142,7 +2142,7 @@ static SArray* createVgroupIdListFromMsg(char* pMsg, SHashObj* pSet, char* name,
return vgroupIdList;
}
-static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t id) {
+static SVgroupsInfo* createVgroupInfoFromMsg(SSqlObj *pSql, char* pMsg, int32_t* size, uint64_t id) {
SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg;
pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups);
@@ -2174,7 +2174,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t
// pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN);
}
- doUpdateVgroupInfo(pVgroup->vgId, vmsg);
+ doUpdateVgroupInfo(pSql, pVgroup->vgId, vmsg);
}
}
@@ -2309,12 +2309,12 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
}
// create the tableMeta and add it into the TableMeta map
- doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, updateStableMeta);
+ doAddTableMetaToLocalBuf(pParentSql, pTableMeta, pMetaMsg, updateStableMeta);
// for each vgroup, only update the information once.
int64_t vgId = pMetaMsg->vgroup.vgId;
if (pTableMeta->tableType != TSDB_SUPER_TABLE && taosHashGet(pSet, &vgId, sizeof(vgId)) == NULL) {
- doUpdateVgroupInfo((int32_t) vgId, &pMetaMsg->vgroup);
+ doUpdateVgroupInfo(pParentSql, (int32_t) vgId, &pMetaMsg->vgroup);
taosHashPut(pSet, &vgId, sizeof(vgId), "", 0);
}
@@ -2339,7 +2339,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
taosArrayDestroy(p->vgroupIdList);
}
- p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, fname, &size, pSql->self);
+ p->vgroupIdList = createVgroupIdListFromMsg(pParentSql, pMsg, pSet, fname, &size, pSql->self);
int32_t numOfVgId = (int32_t) taosArrayGetSize(p->vgroupIdList);
int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t);
@@ -2348,8 +2348,8 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
idList->num = numOfVgId;
memcpy(idList->data, TARRAY_GET_START(p->vgroupIdList), numOfVgId * sizeof(int32_t));
- void* idListInst = taosCachePut(tscVgroupListBuf, fname, len, idList, s, 5000);
- taosCacheRelease(tscVgroupListBuf, (void*) &idListInst, false);
+ void* idListInst = taosCachePut(UTIL_GET_VGROUPLIST(pParentSql), fname, len, idList, s, 5000);
+ taosCacheRelease(UTIL_GET_VGROUPLIST(pParentSql), (void*) &idListInst, false);
tfree(idList);
pMsg += size;
@@ -2439,7 +2439,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
continue;
}
int32_t size = 0;
- pInfo->vgroupList = createVgroupInfoFromMsg(pMsg, &size, pSql->self);
+ pInfo->vgroupList = createVgroupInfoFromMsg(parent, pMsg, &size, pSql->self);
pMsg += size;
}
@@ -2570,7 +2570,8 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
pObj->writeAuth = pConnect->writeAuth;
pObj->superAuth = pConnect->superAuth;
pObj->connId = htonl(pConnect->connId);
-
+ tstrncpy(pObj->clusterId, pConnect->clusterId, sizeof(pObj->clusterId));
+
createHbObj(pObj);
//launch a timer to send heartbeat to maintain the connection and send status to mnode
@@ -2595,9 +2596,9 @@ int tscProcessDropDbRsp(SSqlObj *pSql) {
//TODO LOCK DB WHEN MODIFY IT
//pSql->pTscObj->db[0] = 0;
- taosHashClear(tscTableMetaMap);
- taosHashClear(tscVgroupMap);
- taosCacheEmpty(tscVgroupListBuf);
+ taosHashClear(UTIL_GET_TABLEMETA(pSql));
+ taosHashClear(UTIL_GET_VGROUPMAP(pSql));
+ taosCacheEmpty(UTIL_GET_VGROUPLIST(pSql));
return 0;
}
@@ -2617,7 +2618,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
tscDebug("0x%"PRIx64" remove tableMeta in hashMap after alter-table: %s", pSql->self, name);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
- taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ taosHashRemove(UTIL_GET_TABLEMETA(pSql), name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(pTableMetaInfo->pTableMeta);
if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta
@@ -2932,7 +2933,7 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity);
}
- if (NULL == taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity)) {
+ if (NULL == taosHashGetCloneExt(UTIL_GET_TABLEMETA(pSql), name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity)) {
tfree(pTableMetaInfo->pTableMeta);
}
@@ -2942,7 +2943,7 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
if (pMeta && pMeta->id.uid > 0) {
// in case of child table, here only get the
if (pMeta->tableType == TSDB_CHILD_TABLE) {
- int32_t code = tscCreateTableMetaFromSTableMeta(&pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity, (STableMeta **)(&pSTMeta));
+ int32_t code = tscCreateTableMetaFromSTableMeta(pSql, &pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity, (STableMeta **)(&pSTMeta));
pSql->pBuf = (void *)(pSTMeta);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 5fdaad0d667c19548f699a9a8cfed7c9f017ad1b..2c8ef9b764a0aa2bbf8dd9304776175a8a599a27 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -194,6 +194,7 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass,
tscBuildAndSendRequest(pSql, NULL);
tsem_wait(&pSql->rspSem);
+ pSql->pTscObj->pClusterInfo = (SClusterInfo *)tscAcquireClusterInfo(pSql->pTscObj->clusterId);
if (pSql->res.code != TSDB_CODE_SUCCESS) {
terrno = pSql->res.code;
if (terrno ==TSDB_CODE_RPC_FQDN_ERROR) {
@@ -256,6 +257,7 @@ static void asyncConnCallback(void *param, TAOS_RES *tres, int code) {
SSqlObj *pSql = (SSqlObj *) tres;
assert(pSql != NULL);
+ pSql->pTscObj->pClusterInfo = (SClusterInfo *)tscAcquireClusterInfo(pSql->pTscObj->clusterId);
pSql->fetchFp(pSql->param, tres, code);
}
@@ -268,7 +270,6 @@ TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port,
}
if (taos) *taos = pObj;
-
pSql->fetchFp = fp;
pSql->res.code = tscBuildAndSendRequest(pSql, NULL);
tscDebug("%p DB async connection is opening", taos);
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 99a2a79dc60c89530eb9c2c7f6b5645ca0133ba1..58fb6c979b6d46cd71727b87c2e887d20abcdc15 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -3213,7 +3213,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
for(int32_t i = 0; i < pParentObj->cmd.insertParam.numOfTables; ++i) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(pParentObj->cmd.insertParam.pTableNameList[i], name);
- taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ taosHashRemove(UTIL_GET_TABLEMETA(pParentObj), name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
pParentObj->res.code = TSDB_CODE_SUCCESS;
@@ -3358,7 +3358,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
goto _error;
}
- pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
+ pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pSql, pCmd->insertParam.pDataBlocks);
// use the local variable
for (int32_t j = 0; j < numOfSub; ++j) {
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index b3b83db80a70c19f79d1cd6a732d729817436dd3..edb8169f761e2b5aaba1ddfd7cda8a9008298948 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -33,9 +33,11 @@
int32_t sentinel = TSC_VAR_NOT_RELEASE;
-SHashObj *tscVgroupMap; // hash map to keep the vgroup info from mnode
-SHashObj *tscTableMetaMap; // table meta info buffer
-SCacheObj *tscVgroupListBuf; // super table vgroup list information, only survives 5 seconds for each super table vgroup list
+//SHashObj *tscVgroupMap; // hash map to keep the vgroup info from mnode
+//SHashObj *tscTableMetaMap; // table meta info buffer
+//SCacheObj *tscVgroupListBuf; // super table vgroup list information, only survives 5 seconds for each super table vgroup list
+SHashObj *tscClusterMap = NULL; // cluster obj
+static pthread_mutex_t clusterMutex; // mutex to protect open the cluster obj
int32_t tscObjRef = -1;
void *tscTmr;
@@ -121,6 +123,57 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
return 0;
}
+void tscClusterInfoDestroy(SClusterInfo *pObj) {
+ if (pObj == NULL) { return; }
+ taosHashCleanup(pObj->vgroupMap);
+ taosHashCleanup(pObj->tableMetaMap);
+ taosCacheCleanup(pObj->vgroupListBuf);
+ tfree(pObj);
+}
+
+void *tscAcquireClusterInfo(const char *clusterId) {
+ pthread_mutex_lock(&clusterMutex);
+
+ size_t len = strlen(clusterId);
+ SClusterInfo *pObj = NULL;
+ SClusterInfo **ppObj = taosHashGet(tscClusterMap, clusterId, len);
+ if (ppObj == NULL || *ppObj == NULL) {
+ pObj = calloc(1, sizeof(SClusterInfo));
+ if (pObj) {
+ pObj->vgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
+ pObj->tableMetaMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); //
+ pObj->vgroupListBuf = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, NULL, "stable-vgroup-list");
+ if (pObj->vgroupMap == NULL || pObj->tableMetaMap == NULL || pObj->vgroupListBuf == NULL) {
+ tscClusterInfoDestroy(pObj);
+ pObj = NULL;
+ } else {
+ taosHashPut(tscClusterMap, clusterId, len, &pObj, POINTER_BYTES);
+ }
+ }
+ } else {
+ pObj = *ppObj;
+ }
+
+ if (pObj) { pObj->ref += 1; }
+
+ pthread_mutex_unlock(&clusterMutex);
+ return pObj;
+}
+void tscReleaseClusterInfo(const char *clusterId) {
+ pthread_mutex_lock(&clusterMutex);
+
+ size_t len = strlen(clusterId);
+ SClusterInfo *pObj = NULL;
+ SClusterInfo **ppObj = taosHashGet(tscClusterMap, clusterId, len);
+ if (ppObj != NULL && *ppObj != NULL) {
+ pObj = *ppObj;
+ }
+ if (pObj && --pObj->ref == 0) {
+ taosHashRemove(tscClusterMap, clusterId, len);
+ tscClusterInfoDestroy(pObj);
+ }
+ pthread_mutex_unlock(&clusterMutex);
+}
void taos_init_imp(void) {
char temp[128] = {0};
@@ -188,12 +241,16 @@ void taos_init_imp(void) {
taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr);
}
- if (tscTableMetaMap == NULL) {
+ if (tscClusterMap == NULL) {
tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
- tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
- tscTableMetaMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
- tscVgroupListBuf = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, NULL, "stable-vgroup-list");
- tscDebug("TableMeta:%p, vgroup:%p is initialized", tscTableMetaMap, tscVgroupMap);
+
+ tscClusterMap = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
+ pthread_mutex_init(&clusterMutex, NULL);
+ //tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
+ //tscTableMetaMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
+ //tscVgroupListBuf = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, NULL, "stable-vgroup-list");
+ //tscDebug("TableMeta:%p, vgroup:%p is initialized", tscTableMetaMap, tscVgroupMap);
+
}
int refreshTime = 5;
@@ -222,12 +279,6 @@ void taos_cleanup(void) {
scriptEnvPoolCleanup();
}
- taosHashCleanup(tscTableMetaMap);
- tscTableMetaMap = NULL;
-
- taosHashCleanup(tscVgroupMap);
- tscVgroupMap = NULL;
-
int32_t id = tscObjRef;
tscObjRef = -1;
taosCloseRef(id);
@@ -251,14 +302,16 @@ void taos_cleanup(void) {
}
pthread_mutex_destroy(&setConfMutex);
- taosCacheCleanup(tscVgroupListBuf);
- tscVgroupListBuf = NULL;
if (tscEmbedded == 0) {
rpcCleanup();
taosCloseLog();
};
+ taosHashCleanup(tscClusterMap);
+ tscClusterMap = NULL;
+ pthread_mutex_destroy(&clusterMutex);
+
p = tscTmr;
tscTmr = NULL;
taosTmrCleanUp(p);
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 04d1472bb0101e903aa697ccc443d60e386c4ba2..5e587d75ae78c8c2615d335cc0b3d899672bb4f4 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -271,6 +271,8 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) {
functionId != TSDB_FUNC_TS_COMP &&
functionId != TSDB_FUNC_DIFF &&
functionId != TSDB_FUNC_DERIVATIVE &&
+ functionId != TSDB_FUNC_MAVG &&
+ functionId != TSDB_FUNC_CSUM &&
functionId != TSDB_FUNC_TS_DUMMY &&
functionId != TSDB_FUNC_TID_TAG &&
functionId != TSDB_FUNC_CEIL &&
@@ -321,7 +323,9 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
return true;
}
-bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo) {
+// these functions diff/derivative/csum/mavg will return the result computed on current row and history row/rows
+// as the result for current row
+bool tscIsDiffDerivLikeQuery(SQueryInfo* pQueryInfo) {
size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
@@ -330,7 +334,8 @@ bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo) {
continue;
}
- if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE) {
+ if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE ||
+ f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG) {
return true;
}
}
@@ -551,6 +556,22 @@ bool tscIsIrateQuery(SQueryInfo* pQueryInfo) {
return false;
}
+bool tscQueryContainsFunction(SQueryInfo* pQueryInfo, int16_t functionId) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == functionId) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo) {
return pQueryInfo->sessionWindow.gap > 0;
}
@@ -589,7 +610,7 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) {
return false;
}
- if (tscIsDiffDerivQuery(pQueryInfo)) {
+ if (tscIsDiffDerivLikeQuery(pQueryInfo)) {
return false;
}
@@ -615,7 +636,9 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) {
}
if ((!IS_MULTIOUTPUT(aAggs[functionId].status)) ||
- (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TS_COMP)) {
+ (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM ||
+ functionId == TSDB_FUNC_TS_COMP ||
+ functionId == TSDB_FUNC_SAMPLE)) {
return true;
}
}
@@ -1421,6 +1444,7 @@ void destroyTableNameList(SInsertStatementParam* pInsertParam) {
}
void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta, uint64_t id) {
+ SSqlObj *pSql = (SSqlObj*)taosAcquireRef(tscObjRef, id);
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
@@ -1429,13 +1453,14 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta, uint64_t id) {
pCmd->insertParam.sql = NULL;
destroyTableNameList(&pCmd->insertParam);
- pCmd->insertParam.pTableBlockHashList = tscDestroyBlockHashTable(pCmd->insertParam.pTableBlockHashList, clearCachedMeta);
- pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
+ pCmd->insertParam.pTableBlockHashList = tscDestroyBlockHashTable(pSql, pCmd->insertParam.pTableBlockHashList, clearCachedMeta);
+ pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pSql, pCmd->insertParam.pDataBlocks);
tfree(pCmd->insertParam.tagData.data);
pCmd->insertParam.tagData.dataLen = 0;
tscFreeQueryInfo(pCmd, clearCachedMeta, id);
pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
+ taosReleaseRef(tscObjRef, id);
}
void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap) {
@@ -1571,7 +1596,7 @@ void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo) {
tfree(pColInfo->colIdxInfo);
}
-void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
+void tscDestroyDataBlock(SSqlObj *pSql, STableDataBlocks* pDataBlock, bool removeMeta) {
if (pDataBlock == NULL) {
return;
}
@@ -1582,7 +1607,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pDataBlock->tableName, name);
- taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ taosHashRemove(UTIL_GET_TABLEMETA(pSql), name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
if (!pDataBlock->cloned) {
@@ -1623,7 +1648,7 @@ SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint
return param;
}
-void* tscDestroyBlockArrayList(SArray* pDataBlockList) {
+void* tscDestroyBlockArrayList(SSqlObj *pSql, SArray* pDataBlockList) {
if (pDataBlockList == NULL) {
return NULL;
}
@@ -1631,7 +1656,7 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList) {
size_t size = taosArrayGetSize(pDataBlockList);
for (int32_t i = 0; i < size; i++) {
void* d = taosArrayGetP(pDataBlockList, i);
- tscDestroyDataBlock(d, false);
+ tscDestroyDataBlock(pSql, d, false);
}
taosArrayDestroy(pDataBlockList);
@@ -1679,14 +1704,14 @@ void* tscDestroyUdfArrayList(SArray* pUdfList) {
-void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta) {
+void* tscDestroyBlockHashTable(SSqlObj *pSql, SHashObj* pBlockHashTable, bool removeMeta) {
if (pBlockHashTable == NULL) {
return NULL;
}
STableDataBlocks** p = taosHashIterate(pBlockHashTable, NULL);
while(p) {
- tscDestroyDataBlock(*p, removeMeta);
+ tscDestroyDataBlock(pSql, *p, removeMeta);
p = taosHashIterate(pBlockHashTable, p);
}
@@ -1927,7 +1952,7 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
return result;
}
-static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
+static void extractTableNameList(SSqlObj *pSql, SInsertStatementParam *pInsertParam, bool freeBlockMap) {
pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList);
if (pInsertParam->pTableNameList == NULL) {
pInsertParam->pTableNameList = malloc(pInsertParam->numOfTables * POINTER_BYTES);
@@ -1944,11 +1969,11 @@ static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeB
}
if (freeBlockMap) {
- pInsertParam->pTableBlockHashList = tscDestroyBlockHashTable(pInsertParam->pTableBlockHashList, false);
+ pInsertParam->pTableBlockHashList = tscDestroyBlockHashTable(pSql, pInsertParam->pTableBlockHashList, false);
}
}
-int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
+int32_t tscMergeTableDataBlocks(SSqlObj *pSql, SInsertStatementParam *pInsertParam, bool freeBlockMap) {
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
int code = 0;
bool isRawPayload = IS_RAW_PAYLOAD(pInsertParam->payloadType);
@@ -1973,7 +1998,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
if (ret != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pInsertParam->objectId, ret);
taosHashCleanup(pVnodeDataBlockHashList);
- tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tscDestroyBlockArrayList(pSql, pVnodeDataBlockList);
tfree(blkKeyInfo.pKeyTuple);
return ret;
}
@@ -1992,7 +2017,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pInsertParam->objectId, dataBuf->nAllocSize);
taosHashCleanup(pVnodeDataBlockHashList);
- tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tscDestroyBlockArrayList(pSql, pVnodeDataBlockList);
tfree(dataBuf->pData);
tfree(blkKeyInfo.pKeyTuple);
@@ -2010,7 +2035,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
} else {
if ((code = tscSortRemoveDataBlockDupRows(pOneTableBlock, &blkKeyInfo)) != 0) {
taosHashCleanup(pVnodeDataBlockHashList);
- tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tscDestroyBlockArrayList(pSql, pVnodeDataBlockList);
tfree(dataBuf->pData);
tfree(blkKeyInfo.pKeyTuple);
return code;
@@ -2057,7 +2082,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
pOneTableBlock = *p;
}
- extractTableNameList(pInsertParam, freeBlockMap);
+ extractTableNameList(pSql, pInsertParam, freeBlockMap);
// free the table data blocks;
pInsertParam->pDataBlocks = pVnodeDataBlockList;
@@ -2077,6 +2102,7 @@ void tscCloseTscObj(void *param) {
tfree(pObj->tscCorMgmtEpSet);
tscReleaseRpc(pObj->pRpcObj);
pthread_mutex_destroy(&pObj->mutex);
+ tscReleaseClusterInfo(pObj->clusterId);
tfree(pObj);
}
@@ -4509,6 +4535,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild) {
pTableMeta->tableInfo.numOfTags = pChild->numOfTags;
pTableMeta->tableInfo.numOfColumns = pChild->numOfColumns;
pTableMeta->tableInfo.precision = pChild->precision;
+ pTableMeta->tableInfo.update = pChild->update;
pTableMeta->id.tid = 0;
pTableMeta->id.uid = pChild->suid;
@@ -4550,7 +4577,7 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta;
}
-int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity, STableMeta**ppSTable) {
+int32_t tscCreateTableMetaFromSTableMeta(SSqlObj *pSql, STableMeta** ppChild, const char* name, size_t *tableMetaCapacity, STableMeta**ppSTable) {
assert(*ppChild != NULL);
STableMeta* p = *ppSTable;
STableMeta* pChild = *ppChild;
@@ -4560,11 +4587,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
memset((char *)p, 0, sz);
}
- if (NULL == taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz)) {
+ if (NULL == taosHashGetCloneExt(UTIL_GET_TABLEMETA(pSql), pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz)) {
tfree(p);
- } else {
- *ppSTable = p;
- }
+ }
+ *ppSTable = p;
// tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table.
@@ -4586,7 +4612,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
*ppChild = pChild;
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
- taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ taosHashRemove(UTIL_GET_TABLEMETA(pSql), name, strnlen(name, TSDB_TABLE_FNAME_LEN));
return -1;
}
}
@@ -4834,7 +4860,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->hasTagResults = hasTagValOutput(pQueryInfo);
pQueryAttr->stabledev = isStabledev(pQueryInfo);
pQueryAttr->tsCompQuery = isTsCompQuery(pQueryInfo);
- pQueryAttr->diffQuery = tscIsDiffDerivQuery(pQueryInfo);
+ pQueryAttr->diffQuery = tscIsDiffDerivLikeQuery(pQueryInfo);
pQueryAttr->simpleAgg = isSimpleAggregateRv(pQueryInfo);
pQueryAttr->needReverseScan = tscNeedReverseScan(pQueryInfo);
pQueryAttr->stableQuery = QUERY_IS_STABLE_QUERY(pQueryInfo->type);
@@ -5105,18 +5131,20 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id) {
char fname[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, fname);
+ SSqlObj *p = (SSqlObj *)taosAcquireRef(tscObjRef, id);
+ tNameExtractFullName(&pTableMetaInfo->name, fname);
int32_t len = (int32_t) strnlen(fname, TSDB_TABLE_FNAME_LEN);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- void* pv = taosCacheAcquireByKey(tscVgroupListBuf, fname, len);
+ void* pv = taosCacheAcquireByKey(UTIL_GET_VGROUPLIST(p), fname, len);
if (pv != NULL) {
- taosCacheRelease(tscVgroupListBuf, &pv, true);
+ taosCacheRelease(UTIL_GET_VGROUPLIST(p), &pv, true);
}
}
- taosHashRemove(tscTableMetaMap, fname, len);
- tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap));
+ taosHashRemove(UTIL_GET_TABLEMETA(p), fname, len);
+ tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(UTIL_GET_TABLEMETA(p)));
+ taosReleaseRef(tscObjRef, id);
}
char* cloneCurrentDBName(SSqlObj* pSql) {
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index c3c8625fec3290e2d39d64f53a173b43cf21d7e3..2c357676060b2851759cf123470498ed4329b468 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -780,6 +780,7 @@ typedef struct STableMetaMsg {
char tableFname[TSDB_TABLE_FNAME_LEN]; // table id
uint8_t numOfTags;
uint8_t precision;
+ uint8_t update;
uint8_t tableType;
int16_t numOfColumns;
int16_t sversion;
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index a6158906a7cc77b57244594fe51881e5df0b68c8..6cbb50cf3ff90cd25ba28d8fa4ffb0cc9c7baff5 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1792,6 +1792,7 @@ static int32_t mnodeDoGetSuperTableMeta(SMnodeMsg *pMsg, STableMetaMsg* pMeta) {
pMeta->sversion = htons(pTable->sversion);
pMeta->tversion = htons(pTable->tversion);
pMeta->precision = pMsg->pDb->cfg.precision;
+ pMeta->update = pMsg->pDb->cfg.update;
pMeta->numOfTags = (uint8_t)pTable->numOfTags;
pMeta->numOfColumns = htons((int16_t)pTable->numOfColumns);
pMeta->tableType = pTable->info.type;
@@ -2509,6 +2510,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
pMeta->uid = htobe64(pTable->uid);
pMeta->tid = htonl(pTable->tid);
pMeta->precision = pDb->cfg.precision;
+ pMeta->update = pDb->cfg.update;
pMeta->tableType = pTable->info.type;
tstrncpy(pMeta->tableFname, pTable->info.tableId, TSDB_TABLE_FNAME_LEN);
diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c
index 5b2a01edc3e04ae5b9bb8e9df9c222368aac5c1b..85670e5d412c5b3ee76805431940fe573ebfe2d2 100644
--- a/src/os/src/detail/osTime.c
+++ b/src/os/src/detail/osTime.c
@@ -380,10 +380,40 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
assert(toPrecision == TSDB_TIME_PRECISION_MILLI ||
toPrecision == TSDB_TIME_PRECISION_MICRO ||
toPrecision == TSDB_TIME_PRECISION_NANO);
- static double factors[3][3] = { {1., 1000., 1000000.},
- {1.0 / 1000, 1., 1000.},
- {1.0 / 1000000, 1.0 / 1000, 1.} };
- return (int64_t)((double)time * factors[fromPrecision][toPrecision]);
+ switch(fromPrecision) {
+ case TSDB_TIME_PRECISION_MILLI: {
+ switch (toPrecision) {
+ case TSDB_TIME_PRECISION_MILLI:
+ return time;
+ case TSDB_TIME_PRECISION_MICRO:
+ return time * 1000;
+ case TSDB_TIME_PRECISION_NANO:
+ return time * 1000000;
+ }
+ } // end from milli
+ case TSDB_TIME_PRECISION_MICRO: {
+ switch (toPrecision) {
+ case TSDB_TIME_PRECISION_MILLI:
+ return time / 1000;
+ case TSDB_TIME_PRECISION_MICRO:
+ return time;
+ case TSDB_TIME_PRECISION_NANO:
+ return time * 1000;
+ }
+ } //end from micro
+ case TSDB_TIME_PRECISION_NANO: {
+ switch (toPrecision) {
+ case TSDB_TIME_PRECISION_MILLI:
+ return time / 1000000;
+ case TSDB_TIME_PRECISION_MICRO:
+ return time / 1000;
+ case TSDB_TIME_PRECISION_NANO:
+ return time;
+ }
+ } //end from nano
+ default:
+ assert(0);
+ } //end switch fromPrecision
}
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h
index 4f7821708c3e9b3c3d0eb975125e1ad12c5f82a4..e3e5ccbce9f5c49871a7f0f5378fc56845e1b410 100644
--- a/src/query/inc/qAggMain.h
+++ b/src/query/inc/qAggMain.h
@@ -68,18 +68,23 @@ extern "C" {
#define TSDB_FUNC_IRATE 30
#define TSDB_FUNC_TID_TAG 31
#define TSDB_FUNC_DERIVATIVE 32
-#define TSDB_FUNC_BLKINFO 33
-
-#define TSDB_FUNC_CEIL 34
-#define TSDB_FUNC_FLOOR 35
-#define TSDB_FUNC_ROUND 36
-
-#define TSDB_FUNC_HISTOGRAM 37
-#define TSDB_FUNC_HLL 38
-#define TSDB_FUNC_MODE 39
-#define TSDB_FUNC_SAMPLE 40
-#define TSDB_FUNC_MAVG 41
-#define TSDB_FUNC_CSUM 42
+
+#define TSDB_FUNC_CEIL 33
+#define TSDB_FUNC_FLOOR 34
+#define TSDB_FUNC_ROUND 35
+
+#define TSDB_FUNC_CSUM 36
+#define TSDB_FUNC_MAVG 37
+#define TSDB_FUNC_SAMPLE 38
+
+#define TSDB_FUNC_BLKINFO 39
+
+///////////////////////////////////////////
+// the following functions is not implemented.
+// after implementation, move them before TSDB_FUNC_BLKINFO. also make TSDB_FUNC_BLKINFO the maxium function index
+// #define TSDB_FUNC_HISTOGRAM 40
+// #define TSDB_FUNC_HLL 41
+// #define TSDB_FUNC_MODE 42
#define TSDB_FUNCSTATE_SO 0x1u // single output
#define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM
diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h
index 948a1ae91e01331c4f566ac5089485f717fc5632..3fb489f17ed6dd76a6c18c1cdce288c39d0594a7 100644
--- a/src/query/inc/qTableMeta.h
+++ b/src/query/inc/qTableMeta.h
@@ -56,6 +56,7 @@ typedef struct SGroupbyExpr {
typedef struct STableComInfo {
uint8_t numOfTags;
uint8_t precision;
+ uint8_t update;
int16_t numOfColumns;
int32_t rowSize;
} STableComInfo;
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index 1fd682aebd6ac7899ca0a88f6a4744cd4ebbb006..5a3aa6934a8d15a84de2d2770db0aac59f48fa63 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -169,6 +169,27 @@ typedef struct SDerivInfo {
bool valueSet; // the value has been set already
} SDerivInfo;
+typedef struct {
+ double cumSum;
+} SCumSumInfo;
+
+typedef struct {
+ int32_t pos;
+ double sum;
+ int32_t numPointsK;
+ double* points;
+ bool kPointsMeet;
+} SMovingAvgInfo;
+
+typedef struct {
+ int32_t totalPoints;
+ int32_t numSampled;
+ int16_t colBytes;
+ char *values;
+ int64_t *timeStamps;
+ char *taglists;
+} SSampleFuncInfo;
+
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
int16_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo) {
if (!isValidDataType(dataType)) {
@@ -237,6 +258,27 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
return TSDB_CODE_SUCCESS;
}
+ if (functionId == TSDB_FUNC_CSUM) {
+ if (IS_SIGNED_NUMERIC_TYPE(dataType)) {
+ *type = TSDB_DATA_TYPE_BIGINT;
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(dataType)) {
+ *type = TSDB_DATA_TYPE_UBIGINT;
+ } else {
+ *type = TSDB_DATA_TYPE_DOUBLE;
+ }
+
+ *bytes = sizeof(int64_t);
+ *interBytes = sizeof(SCumSumInfo);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (functionId == TSDB_FUNC_MAVG) {
+ *type = TSDB_DATA_TYPE_DOUBLE;
+ *bytes = sizeof(double);
+ *interBytes = sizeof(SMovingAvgInfo) + sizeof(double) * param;
+ return TSDB_CODE_SUCCESS;
+ }
+
if (isSuperTable) {
if (functionId < 0) {
if (pUdfInfo->bufSize > 0) {
@@ -280,6 +322,12 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*bytes = (int16_t)(sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param);
*interBytes = *bytes;
+ return TSDB_CODE_SUCCESS;
+ } else if (functionId == TSDB_FUNC_SAMPLE) {
+ *type = TSDB_DATA_TYPE_BINARY;
+ *bytes = (int16_t)(sizeof(SSampleFuncInfo) + dataBytes*param + sizeof(int64_t)*param + extLength*param);
+ *interBytes = *bytes;
+
return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_SPREAD) {
*type = TSDB_DATA_TYPE_BINARY;
@@ -389,6 +437,11 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
// the output column may be larger than sizeof(STopBotInfo)
*interBytes = (int32_t)size;
+ } else if (functionId == TSDB_FUNC_SAMPLE) {
+ *type = (int16_t)dataType;
+ *bytes = (int16_t)dataBytes;
+ size_t size = sizeof(SSampleFuncInfo) + dataBytes*param + sizeof(int64_t)*param + extLength*param;
+ *interBytes = (int32_t)size;
} else if (functionId == TSDB_FUNC_LAST_ROW) {
*type = (int16_t)dataType;
*bytes = (int16_t)dataBytes;
@@ -407,7 +460,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
// TODO use hash table
int32_t isValidFunction(const char* name, int32_t len) {
- for(int32_t i = 0; i <= TSDB_FUNC_ROUND; ++i) {
+ for(int32_t i = 0; i <= TSDB_FUNC_BLKINFO; ++i) {
int32_t nameLen = (int32_t) strlen(aAggs[i].name);
if (len != nameLen) {
continue;
@@ -4085,6 +4138,8 @@ static void irate_function(SQLFunctionCtx *pCtx) {
}
}
+/////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
void blockInfo_func(SQLFunctionCtx* pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo);
@@ -4258,6 +4313,8 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
doFinalizer(pCtx);
}
+/////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
#define CFR_SET_VAL(type, data, pCtx, func, i, step, notNullElems) \
do { \
type *pData = (type *) data; \
@@ -4483,6 +4540,313 @@ static void round_function(SQLFunctionCtx *pCtx) {
#undef CFR_SET_VAL
#undef CFR_SET_VAL_DOUBLE
+//////////////////////////////////////////////////////////////////////////////////
+//cumulative_sum function
+
+static bool csum_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) {
+ if (!function_setup(pCtx, pResInfo)) {
+ return false;
+ }
+
+ SCumSumInfo* pCumSumInfo = GET_ROWCELL_INTERBUF(pResInfo);
+ pCumSumInfo->cumSum = 0;
+ return true;
+}
+
+static void csum_function(SQLFunctionCtx *pCtx) {
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ SCumSumInfo* pCumSumInfo = GET_ROWCELL_INTERBUF(pResInfo);
+
+ int32_t notNullElems = 0;
+ int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
+ int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size -1;
+
+ TSKEY* pTimestamp = pCtx->ptsOutputBuf;
+ TSKEY* tsList = GET_TS_LIST(pCtx);
+
+ qDebug("%p csum_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull);
+
+ for (; i < pCtx->size && i >= 0; i += step) {
+ char* pData = GET_INPUT_DATA(pCtx, i);
+ if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
+ qDebug("%p csum_function() index of null data:%d", pCtx, i);
+ continue;
+ }
+
+ double v = 0;
+ GET_TYPED_DATA(v, double, pCtx->inputType, pData);
+ pCumSumInfo->cumSum += v;
+
+ *pTimestamp = (tsList != NULL) ? tsList[i] : 0;
+ if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) {
+ int64_t *retVal = (int64_t *)pCtx->pOutput;
+ *retVal = (int64_t)(pCumSumInfo->cumSum);
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) {
+ uint64_t *retVal = (uint64_t *)pCtx->pOutput;
+ *retVal = (uint64_t)(pCumSumInfo->cumSum);
+ } else if (IS_FLOAT_TYPE(pCtx->inputType)) {
+ double *retVal = (double*) pCtx->pOutput;
+ SET_DOUBLE_VAL(retVal, pCumSumInfo->cumSum);
+ }
+
+ ++notNullElems;
+ pCtx->pOutput += pCtx->outputBytes;
+ pTimestamp++;
+ }
+
+ if (notNullElems == 0) {
+ assert(pCtx->hasNull);
+ } else {
+ GET_RES_INFO(pCtx)->numOfRes += notNullElems;
+ GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////
+// Simple Moving_average function
+
+static bool mavg_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) {
+ if (!function_setup(pCtx, pResInfo)) {
+ return false;
+ }
+
+ SMovingAvgInfo* mavgInfo = GET_ROWCELL_INTERBUF(pResInfo);
+ mavgInfo->pos = 0;
+ mavgInfo->kPointsMeet = false;
+ mavgInfo->sum = 0;
+ mavgInfo->numPointsK = (int32_t)pCtx->param[0].i64;
+ mavgInfo->points = (double*)((char*)mavgInfo + sizeof(SMovingAvgInfo));
+ return true;
+}
+
+static void mavg_function(SQLFunctionCtx *pCtx) {
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ SMovingAvgInfo* mavgInfo = GET_ROWCELL_INTERBUF(pResInfo);
+
+ int32_t notNullElems = 0;
+ int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
+ int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size -1;
+
+ TSKEY* pTimestamp = pCtx->ptsOutputBuf;
+ char* pOutput = pCtx->pOutput;
+ TSKEY* tsList = GET_TS_LIST(pCtx);
+
+ for (; i < pCtx->size && i >= 0; i += step) {
+ char* pData = GET_INPUT_DATA(pCtx, i);
+ if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
+ qDebug("%p mavg_function() index of null data:%d", pCtx, i);
+ continue;
+ }
+
+ double v = 0;
+ GET_TYPED_DATA(v, double, pCtx->inputType, pData);
+
+ if (!mavgInfo->kPointsMeet && mavgInfo->pos < mavgInfo->numPointsK - 1) {
+ mavgInfo->points[mavgInfo->pos] = v;
+ mavgInfo->sum += v;
+ } else {
+ if (!mavgInfo->kPointsMeet && mavgInfo->pos == mavgInfo->numPointsK - 1){
+ mavgInfo->sum += v;
+ mavgInfo->kPointsMeet = true;
+ } else {
+ mavgInfo->sum = mavgInfo->sum + v - mavgInfo->points[mavgInfo->pos];
+ }
+ mavgInfo->points[mavgInfo->pos] = v;
+
+ *pTimestamp = (tsList != NULL) ? tsList[i] : 0;
+ SET_DOUBLE_VAL(pOutput, mavgInfo->sum / mavgInfo->numPointsK)
+
+ ++notNullElems;
+ pOutput += pCtx->outputBytes;
+ pTimestamp++;
+ }
+
+ ++mavgInfo->pos;
+ if (mavgInfo->pos == mavgInfo->numPointsK) {
+ mavgInfo->pos = 0;
+ }
+ }
+
+ if (notNullElems <= 0) {
+ assert(pCtx->hasNull);
+ } else {
+ GET_RES_INFO(pCtx)->numOfRes += notNullElems;
+ GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////
+// Sample function with reservoir sampling algorithm
+
+static SSampleFuncInfo* getSampleFuncOutputInfo(SQLFunctionCtx *pCtx) {
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+
+ // only the first_stage stable is directly written data into final output buffer
+ if (pCtx->stableQuery && pCtx->currentStage != MERGE_STAGE) {
+ return (SSampleFuncInfo *) pCtx->pOutput;
+ } else { // during normal table query and super table at the secondary_stage, result is written to intermediate buffer
+ return GET_ROWCELL_INTERBUF(pResInfo);
+ }
+}
+
+static void assignResultSample(SQLFunctionCtx *pCtx, SSampleFuncInfo *pInfo, int32_t index, int64_t ts, void *pData, uint16_t type, int16_t bytes, char *inputTags) {
+ assignVal(pInfo->values + index*bytes, pData, bytes, type);
+ *(pInfo->timeStamps + index) = ts;
+
+ SExtTagsInfo* pTagInfo = &pCtx->tagInfo;
+ int32_t posTag = 0;
+ char* tags = pInfo->taglists + index*pTagInfo->tagsLen;
+ if (pCtx->currentStage == MERGE_STAGE) {
+ assert(inputTags != NULL);
+ memcpy(tags, inputTags, (size_t)pTagInfo->tagsLen);
+ } else {
+ assert(inputTags == NULL);
+ for (int32_t i = 0; i < pTagInfo->numOfTagCols; ++i) {
+ SQLFunctionCtx* ctx = pTagInfo->pTagCtxList[i];
+ if (ctx->functionId == TSDB_FUNC_TS_DUMMY) {
+ ctx->tag.nType = TSDB_DATA_TYPE_BIGINT;
+ ctx->tag.i64 = ts;
+ }
+
+ tVariantDump(&ctx->tag, tags + posTag, ctx->tag.nType, true);
+ posTag += pTagInfo->pTagCtxList[i]->outputBytes;
+ }
+ }
+}
+
+static void do_reservoir_sample(SQLFunctionCtx *pCtx, SSampleFuncInfo *pInfo, int32_t samplesK, int64_t ts, void *pData, uint16_t type, int16_t bytes) {
+ pInfo->totalPoints++;
+ if (pInfo->numSampled < samplesK) {
+ assignResultSample(pCtx, pInfo, pInfo->numSampled, ts, pData, type, bytes, NULL);
+ pInfo->numSampled++;
+ } else {
+ int32_t j = rand() % (pInfo->totalPoints);
+ if (j < samplesK) {
+ assignResultSample(pCtx, pInfo, j, ts, pData, type, bytes, NULL);
+ }
+ }
+}
+
+static void copySampleFuncRes(SQLFunctionCtx *pCtx, int32_t type) {
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ SSampleFuncInfo *pRes = GET_ROWCELL_INTERBUF(pResInfo);
+
+ TSKEY* pTimestamp = pCtx->ptsOutputBuf;
+ char* pOutput = pCtx->pOutput;
+ for (int32_t i = 0; i < pRes->numSampled; ++i) {
+ assignVal(pOutput, pRes->values + i*pRes->colBytes, pRes->colBytes, type);
+ *pTimestamp = *(pRes->timeStamps + i);
+ pOutput += pCtx->outputBytes;
+ pTimestamp++;
+ }
+
+ char **tagOutputs = calloc(pCtx->tagInfo.numOfTagCols, POINTER_BYTES);
+ for (int32_t i = 0; i < pCtx->tagInfo.numOfTagCols; ++i) {
+ tagOutputs[i] = pCtx->tagInfo.pTagCtxList[i]->pOutput;
+ }
+
+ for (int32_t i = 0; i < pRes->numSampled; ++i) {
+ int16_t tagOffset = 0;
+ for (int32_t j = 0; j < pCtx->tagInfo.numOfTagCols; ++j) {
+ memcpy(tagOutputs[j], pRes->taglists + i*pCtx->tagInfo.tagsLen + tagOffset, (size_t)pCtx->tagInfo.pTagCtxList[j]->outputBytes);
+ tagOffset += pCtx->tagInfo.pTagCtxList[j]->outputBytes;
+ tagOutputs[j] += pCtx->tagInfo.pTagCtxList[j]->outputBytes;
+ }
+ }
+
+ tfree(tagOutputs);
+}
+
+static bool sample_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) {
+ if (!function_setup(pCtx, pResInfo)) {
+ return false;
+ }
+
+ srand(taosSafeRand());
+
+ SSampleFuncInfo *pRes = getSampleFuncOutputInfo(pCtx);
+ pRes->totalPoints = 0;
+ pRes->numSampled = 0;
+ pRes->values = ((char*)pRes + sizeof(SSampleFuncInfo));
+ pRes->colBytes = (pCtx->currentStage != MERGE_STAGE) ? pCtx->inputBytes : pCtx->outputBytes;
+ pRes->timeStamps = (int64_t *)((char *)pRes->values + pRes->colBytes * pCtx->param[0].i64);
+ pRes->taglists = (char*)pRes->timeStamps + sizeof(int64_t) * pCtx->param[0].i64;
+ return true;
+}
+
+static void sample_function(SQLFunctionCtx *pCtx) {
+ int32_t notNullElems = 0;
+
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ SSampleFuncInfo *pRes = getSampleFuncOutputInfo(pCtx);
+
+ if (pRes->values != ((char*)pRes + sizeof(SSampleFuncInfo))) {
+ pRes->values = ((char*)pRes + sizeof(SSampleFuncInfo));
+ pRes->timeStamps = (int64_t*)((char*)pRes->values + pRes->colBytes * pCtx->param[0].i64);
+ pRes->taglists = (char*)pRes->timeStamps + sizeof(int64_t) * pCtx->param[0].i64;
+ }
+
+ for (int32_t i = 0; i < pCtx->size; ++i) {
+ char *data = GET_INPUT_DATA(pCtx, i);
+ if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
+ continue;
+ }
+
+ notNullElems++;
+
+ TSKEY ts = (pCtx->ptsList != NULL)? GET_TS_DATA(pCtx, i):0;
+ do_reservoir_sample(pCtx, pRes, (int32_t)pCtx->param[0].i64, ts, data, pCtx->inputType, pRes->colBytes);
+ }
+
+ if (!pCtx->hasNull) {
+ assert(pCtx->size == notNullElems);
+ }
+
+ // treat the result as only one result
+ SET_VAL(pCtx, notNullElems, 1);
+
+ if (notNullElems > 0) {
+ pResInfo->hasResult = DATA_SET_FLAG;
+ }
+}
+
+static void sample_func_merge(SQLFunctionCtx *pCtx) {
+ SSampleFuncInfo* pInput = (SSampleFuncInfo*)GET_INPUT_DATA_LIST(pCtx);
+ pInput->values = ((char*)pInput + sizeof(SSampleFuncInfo));
+ pInput->timeStamps = (int64_t*)((char*)pInput->values + pInput->colBytes * pCtx->param[0].i64);
+ pInput->taglists = (char*)pInput->timeStamps + sizeof(int64_t)*pCtx->param[0].i64;
+
+ SSampleFuncInfo *pOutput = getSampleFuncOutputInfo(pCtx);
+ pOutput->totalPoints = pInput->totalPoints;
+ pOutput->numSampled = pInput->numSampled;
+ for (int32_t i = 0; i < pInput->numSampled; ++i) {
+ assignResultSample(pCtx, pOutput, i, pInput->timeStamps[i],
+ pInput->values + i * pInput->colBytes, pCtx->outputType, pInput->colBytes,
+ pInput->taglists + i*pCtx->tagInfo.tagsLen);
+ }
+
+ SET_VAL(pCtx, pInput->numSampled, pOutput->numSampled);
+ if (pOutput->numSampled > 0) {
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ pResInfo->hasResult = DATA_SET_FLAG;
+ }
+}
+
+static void sample_func_finalizer(SQLFunctionCtx *pCtx) {
+ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
+ SSampleFuncInfo *pRes = GET_ROWCELL_INTERBUF(pResInfo);
+
+ if (pRes->numSampled == 0) { // no result
+ assert(pResInfo->hasResult != DATA_SET_FLAG);
+ }
+
+ pResInfo->numOfRes = pRes->numSampled;
+ GET_TRUE_DATA_TYPE();
+ copySampleFuncRes(pCtx, type);
+
+ doFinalizer(pCtx);
+}
+
/////////////////////////////////////////////////////////////////////////////////////////////
/*
* function compatible list.
@@ -4496,13 +4860,15 @@ static void round_function(SQLFunctionCtx *pCtx) {
*/
int32_t functionCompatList[] = {
// count, sum, avg, min, max, stddev, percentile, apercentile, first, last
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- // last_row,top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_comp
- 4, -1, -1, 1, 1, 1, 1, 1, 1, -1,
- // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate
- 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
- // tid_tag, derivative, blk_info,ceil, floor, round
- 6, 8, 7, 1, 1, 1
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ // last_row,top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_comp
+ 4, -1, -1, 1, 1, 1, 1, 1, 1, -1,
+ // tag, colprj, tagprj, arithm, diff, first_dist, last_dist, stddev_dst, interp rate, irate
+ 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
+ // tid_tag, deriv, ceil, floor, round, csum, mavg, sample,
+ 6, 8, 1, 1, 1, -1, -1, -1,
+ // block_info
+ 7
};
SAggFunctionInfo aAggs[] = {{
@@ -4904,51 +5270,85 @@ SAggFunctionInfo aAggs[] = {{
noop1,
dataBlockRequired,
},
- {
- // 33
- "_block_dist", // return table id and the corresponding tags for join match and subscribe
- TSDB_FUNC_BLKINFO,
- TSDB_FUNC_BLKINFO,
- TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE,
- function_setup,
- blockInfo_func,
- blockinfo_func_finalizer,
- block_func_merge,
- dataBlockRequired,
+ {// 33
+ "ceil",
+ TSDB_FUNC_CEIL,
+ TSDB_FUNC_CEIL,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
+ function_setup,
+ ceil_function,
+ doFinalizer,
+ noop1,
+ dataBlockRequired
+ },
+ {// 34
+ "floor",
+ TSDB_FUNC_FLOOR,
+ TSDB_FUNC_FLOOR,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
+ function_setup,
+ floor_function,
+ doFinalizer,
+ noop1,
+ dataBlockRequired
+ },
+ {// 35
+ "round",
+ TSDB_FUNC_ROUND,
+ TSDB_FUNC_ROUND,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
+ function_setup,
+ round_function,
+ doFinalizer,
+ noop1,
+ dataBlockRequired
},
{
- // 34
- "ceil",
- TSDB_FUNC_CEIL,
- TSDB_FUNC_CEIL,
- TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
- function_setup,
- ceil_function,
+ // 36
+ "csum",
+ TSDB_FUNC_CSUM,
+ TSDB_FUNC_INVALID_ID,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
+ csum_function_setup,
+ csum_function,
doFinalizer,
noop1,
- dataBlockRequired
+ dataBlockRequired,
},
{
- // 35
- "floor",
- TSDB_FUNC_FLOOR,
- TSDB_FUNC_FLOOR,
- TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
- function_setup,
- floor_function,
+ // 37
+ "mavg",
+ TSDB_FUNC_MAVG,
+ TSDB_FUNC_INVALID_ID,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
+ mavg_function_setup,
+ mavg_function,
doFinalizer,
noop1,
- dataBlockRequired
+ dataBlockRequired,
},
{
- // 36
- "round",
- TSDB_FUNC_ROUND,
- TSDB_FUNC_ROUND,
- TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
+ // 38
+ "sample",
+ TSDB_FUNC_SAMPLE,
+ TSDB_FUNC_SAMPLE,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
+ sample_function_setup,
+ sample_function,
+ sample_func_finalizer,
+ sample_func_merge,
+ dataBlockRequired,
+ },
+ {
+ // 39
+ "_block_dist",
+ TSDB_FUNC_BLKINFO,
+ TSDB_FUNC_BLKINFO,
+ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE,
function_setup,
- round_function,
- doFinalizer,
- noop1,
- dataBlockRequired
- }};
+ blockInfo_func,
+ blockinfo_func_finalizer,
+ block_func_merge,
+ dataBlockRequired,
+ },
+};
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 982996d70d6e8c05c45425e737b57a08daf331c9..bac81c98980f2d48d8436c1175baa697932aa126 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -2060,7 +2060,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
- int32_t f = pExpr[0].base.functionId;
+ int32_t f = pExpr[i-1].base.functionId;
assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY);
pCtx->param[2].i64 = pQueryAttr->order.order;
@@ -3653,7 +3653,8 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
// set the timestamp output buffer for top/bottom/diff query
int32_t fid = pCtx[i].functionId;
- if (fid == TSDB_FUNC_TOP || fid == TSDB_FUNC_BOTTOM || fid == TSDB_FUNC_DIFF || fid == TSDB_FUNC_DERIVATIVE) {
+ if (fid == TSDB_FUNC_TOP || fid == TSDB_FUNC_BOTTOM || fid == TSDB_FUNC_DIFF || fid == TSDB_FUNC_DERIVATIVE ||
+ fid == TSDB_FUNC_SAMPLE || fid == TSDB_FUNC_MAVG || fid == TSDB_FUNC_CSUM) {
if (i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
}
}
@@ -3690,7 +3691,10 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf
// set the correct pointer after the memory buffer reallocated.
int32_t functionId = pBInfo->pCtx[i].functionId;
- if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
+ if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM ||
+ functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE ||
+ functionId == TSDB_FUNC_CSUM || functionId == TSDB_FUNC_MAVG ||
+ functionId == TSDB_FUNC_SAMPLE ) {
if (i > 0) pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput;
}
}
@@ -3702,7 +3706,9 @@ void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput)
char *src = NULL;
for (int32_t i = 0; i < numOfOutput; i++) {
int32_t functionId = pCtx[i].functionId;
- if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
+ if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE ||
+ functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_CSUM ||
+ functionId == TSDB_FUNC_SAMPLE) {
needCopyTs = true;
if (i > 0 && pCtx[i-1].functionId == TSDB_FUNC_TS_DUMMY){
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i - 1); // find ts data
@@ -3918,7 +3924,8 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
continue;
}
- if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
+ if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF ||
+ functionId == TSDB_FUNC_CSUM || functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) {
if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
}
@@ -3979,7 +3986,9 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
- if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
+ if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM ||
+ functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE ||
+ functionId == TSDB_FUNC_SAMPLE || functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_CSUM) {
if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
}
@@ -7922,7 +7931,7 @@ static int32_t updateOutputBufForTopBotQuery(SQueriedTableInfo* pTableInfo, SCol
for (int32_t i = 0; i < numOfOutput; ++i) {
int16_t functId = pExprs[i].base.functionId;
- if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_BOTTOM) {
+ if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_BOTTOM || functId == TSDB_FUNC_SAMPLE) {
int32_t j = getColumnIndexInSource(pTableInfo, &pExprs[i].base, pTagCols);
if (j < 0 || j >= pTableInfo->numOfCols) {
return TSDB_CODE_QRY_INVALID_MSG;
@@ -8914,6 +8923,7 @@ static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type
if (IS_VAR_DATA_TYPE(type)) {
// Binary data overflows for sort of unknown reasons. Let trim the overflow data
+ // overflow one reason is client tag length is less than server tag length
if (varDataTLen(val) > bytes) {
int32_t maxLen = bytes - VARSTR_HEADER_SIZE;
int32_t len = (varDataLen(val) > maxLen)? maxLen:varDataLen(val);
diff --git a/src/query/src/qTableMeta.c b/src/query/src/qTableMeta.c
index f687b8aa1ffc530d0c4a71c553809dd3bfb83932..f786f4438c2915299fa320818d7a36811eef40dd 100644
--- a/src/query/src/qTableMeta.c
+++ b/src/query/src/qTableMeta.c
@@ -84,6 +84,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->tableInfo = (STableComInfo) {
.numOfTags = pTableMetaMsg->numOfTags,
.precision = pTableMetaMsg->precision,
+ .update = pTableMetaMsg->update,
.numOfColumns = pTableMetaMsg->numOfColumns,
};
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index bc27e094db3dcb85ffa73810e922d73cd42ab3a0..8babdbf1c39c8da9139c691e24f0a87ab57fb854 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -33,7 +33,9 @@ typedef struct SCompSupporter {
int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable) {
if (pQueryAttr && (!stable)) {
for (int16_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
- if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP || pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM) {
+ if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP ||
+ pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM ||
+ pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_SAMPLE) {
return (int32_t)pQueryAttr->pExpr1[i].base.param[0].i64;
}
}
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index a311868de6f7254d776f08a4f4a247293609aef5..72c86018e83399b7368130dc5b2e5af386caa041 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -210,7 +210,7 @@ void *tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_
}
char *val = tdGetKVRowValOfCol(((STable*)pTable)->tagVal, colId);
- assert(type == pCol->type && bytes >= pCol->bytes);
+ assert(type == pCol->type);
// if (val != NULL && IS_VAR_DATA_TYPE(type)) {
// assert(varDataLen(val) < pCol->bytes);
diff --git a/tests/examples/c/connect_two_cluster.c b/tests/examples/c/connect_two_cluster.c
new file mode 100644
index 0000000000000000000000000000000000000000..fa54dd437036f12915d62a60f96b90e6a7adc45f
--- /dev/null
+++ b/tests/examples/c/connect_two_cluster.c
@@ -0,0 +1,162 @@
+#include
+#include
+#include
+#include
+#include "taos.h"
+int numOfThreads = 1;
+
+void* connectClusterAndDeal(void *arg) {
+ int port = *(int *)arg;
+ const char *host = "127.0.0.1";
+ const char *user = "root";
+ const char *passwd = "taosdata";
+ TAOS* taos1 = taos_connect(host, user, passwd, "", port);
+ TAOS* taos2 = taos_connect(host, user, passwd, "", port + 1000);
+ if (NULL == taos1 || NULL == taos2) {
+ printf("connect to (%d/%d) failed \n", port, port + 1000);
+ return NULL;
+ }
+ TAOS_RES *result = NULL;
+ result = taos_query(taos1, "drop database if exists db");
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+ taos_free_result(result);
+
+ taos_query(taos2, "drop database if exists db");
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+
+ taos_free_result(result);
+ // ========= build database
+ {
+ result = taos_query(taos1, "create database db");
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+
+ taos_free_result(result);
+ }
+ {
+ result = taos_query(taos2, "create database db");
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+ taos_free_result(result);
+ }
+
+ //======== create table
+ {
+ result = taos_query(taos1, "create stable db.stest (ts timestamp, port int) tags(tport int)");
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+ taos_free_result(result);
+ }
+ {
+ result = taos_query(taos2, "create stable db.stest (ts timestamp, port int) tags(tport int)");
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+ taos_free_result(result);
+
+ }
+ //======== create table
+ {
+ result = taos_query(taos1, "use db");
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+ taos_free_result(result);
+ }
+ {
+ result = taos_query(taos2, "use db");
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+ taos_free_result(result);
+
+ }
+ {
+ char buf[1024] = {0};
+ sprintf(buf, "insert into db.t1 using stest tags(%d) values(now, %d)", port, port);
+ for (int i = 0; i < 100000; i++) {
+ //printf("error here\t");
+ result = taos_query(taos1, buf);
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+ taos_free_result(result);
+ //sleep(1);
+ }
+ }
+
+ {
+ char buf[1024] = {0};
+ sprintf(buf, "insert into db.t1 using stest tags(%d) values(now, %d)", port + 1000, port + 1000);
+ for (int i = 0; i < 100000; i++) {
+ result = taos_query(taos2, buf);
+ if (0 != taos_errno(result)) {
+ printf("failed %s\n", taos_errstr(result));
+ }
+ taos_free_result(result);
+ //sleep(1);
+ }
+ }
+ // query result
+ {
+ result = taos_query(taos1, "select * from stest");
+ if (result == NULL || taos_errno(result) != 0) {
+ printf("query failed %s\n", taos_errstr(result));
+ taos_free_result(result);
+ }
+ TAOS_ROW row;
+ int rows = 0;
+ int num_fields = taos_field_count(result);
+ TAOS_FIELD *fields = taos_fetch_fields(result);
+ while ((row = taos_fetch_row(result))) {
+ char temp[1024] = {0};
+ rows++;
+ taos_print_row(temp, row, fields , num_fields);
+ printf("%s\n", temp);
+ }
+ taos_free_result(result);
+ }
+
+ // query result
+ {
+ result = taos_query(taos2, "select * from stest");
+ if (result == NULL || taos_errno(result) != 0) {
+ printf("query failed %s\n", taos_errstr(result));
+ taos_free_result(result);
+ }
+ TAOS_ROW row;
+ int rows = 0;
+ int num_fields = taos_field_count(result);
+ TAOS_FIELD *fields = taos_fetch_fields(result);
+ while ((row = taos_fetch_row(result))) {
+ char temp[1024] = {0};
+ rows++;
+ taos_print_row(temp, row, fields , num_fields);
+ printf("%s\n", temp);
+ }
+ taos_free_result(result);
+ }
+ taos_close(taos1);
+ taos_close(taos2);
+ return NULL;
+}
+int main(int argc, char* argv[]) {
+ pthread_t *pthreads = malloc(sizeof(pthread_t) * numOfThreads);
+
+ int *port = malloc(sizeof(int) * numOfThreads);
+ port[0] = 6030;
+ for (int i = 0; i < numOfThreads; i++) {
+ pthread_create(&pthreads[i], NULL, connectClusterAndDeal, (void *)&port[i]);
+ }
+ for (int i = 0; i < numOfThreads; i++) {
+ pthread_join(pthreads[i], NULL);
+ }
+ free(port);
+}
diff --git a/tests/pytest/client/one_client_connect_two_server.py b/tests/pytest/client/one_client_connect_two_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d5b127405ffbdaa533a9f628b4bb2323b168d71
--- /dev/null
+++ b/tests/pytest/client/one_client_connect_two_server.py
@@ -0,0 +1,342 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import json
+import taos
+import time
+import random
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+'''
+
+Before test start,Two TDengine services have been set up on different servers
+
+'''
+
+host1 = '192.168.1.101'
+host2 = '192.168.1.102'
+user = 'root'
+password = 'taosdata'
+cfgdir = '/home/cp/taos/TDengine/sim/dnode1/cfg'
+
+conn1 = taos.connect(host=host1, user=user, password=password, config=cfgdir)
+conn2 = taos.connect(host=host2, user=user, password=password, config=cfgdir)
+cursor1 = conn1.cursor()
+cursor2 = conn2.cursor()
+tdSql1 = TDSql()
+tdSql2 = TDSql()
+tdSql1.init(cursor1)
+tdSql2.init(cursor2)
+
+dbname11 = 'db11'
+dbname12 = 'db12'
+dbname21 = 'db21'
+stbname11 = 'stb11'
+stbname12 = 'stb12'
+stbname21 = 'stb21'
+tbnum = 100
+data_row = 100
+db1_stb1_column = 'ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) '
+db1_stb1_tag = 'st1 int, st2 float, st3 timestamp, st4 binary(16), st5 double, st6 bool, st7 bigint, st8 smallint, st9 tinyint, st10 nchar(16)'
+
+def dbsql(dbname):
+ return f"create database {dbname} keep 3650"
+
+def stbsql(stbname, columntype, tagtype):
+ return f'create stable {stbname} ({columntype}) tags ({tagtype}) '
+
+def tbsql(tbname, stbname, tags):
+ return f'create table {tbname} using {stbname} tags ({tags})'
+
+def datasql(tbname, data):
+ return f'insert into {tbname} values ({data})'
+
+def testquery():
+ ti = random.randint(0,tbnum-1)
+
+ tdSql1.query(f"select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 from tm{ti}")
+ tdSql1.checkData(0, 0, ti)
+ tdSql1.checkData(0, 1, ti)
+ tdSql1.checkData(0, 2, ti)
+ tdSql1.checkData(0, 3, f'binary_{ti}')
+ tdSql1.checkData(0, 4, ti)
+ tdSql1.checkData(0, 5, ti%2)
+ tdSql1.checkData(0, 6, ti)
+ tdSql1.checkData(0, 7, ti%32768)
+ tdSql1.checkData(0, 8, ti%128)
+ tdSql1.checkData(0, 9, f'nchar_{ti}')
+ tdSql2.query(f"select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 from tn{ti}")
+ tdSql2.checkData(0, 0, ti+10000)
+ tdSql2.checkData(0, 1, ti+10000)
+ tdSql2.checkData(0, 2, ti+10000)
+ tdSql2.checkData(0, 3, f'binary_{ti+10000}')
+ tdSql2.checkData(0, 4, ti+10000)
+ tdSql2.checkData(0, 5, (ti+10000)%2)
+ tdSql2.checkData(0, 6, ti+10000)
+ tdSql2.checkData(0, 7, (ti+10000)%32768)
+ tdSql2.checkData(0, 8, (ti+10000)%128)
+ tdSql2.checkData(0, 9, f'nchar_{ti+10000}')
+
+ tdSql1.query(f"select last(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10) from {stbname11}")
+ tdSql1.checkData(0, 0, data_row-1)
+ tdSql1.checkData(0, 1, data_row-1)
+ tdSql1.checkData(0, 2, data_row-1)
+ tdSql1.checkData(0, 3, f'binary_{data_row-1}')
+ tdSql1.checkData(0, 4, data_row-1)
+ tdSql1.checkData(0, 5, (data_row-1)%2)
+ tdSql1.checkData(0, 6, data_row-1)
+ tdSql1.checkData(0, 7, (data_row-1)%32768)
+ tdSql1.checkData(0, 8, (data_row-1)%128)
+ tdSql1.checkData(0, 9, f'nchar_{data_row-1}')
+
+ tdSql1.query(f"select first(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10) from {stbname11}")
+ tdSql1.checkData(0, 0, 0)
+ tdSql1.checkData(0, 1, 0)
+ tdSql1.checkData(0, 2, 0)
+ tdSql1.checkData(0, 3, f'binary_0')
+ tdSql1.checkData(0, 4, 0)
+ tdSql1.checkData(0, 5, 0)
+ tdSql1.checkData(0, 6, 0)
+ tdSql1.checkData(0, 7, 0)
+ tdSql1.checkData(0, 8, 0)
+ tdSql1.checkData(0, 9, f'nchar_0')
+
+ tdSql1.error("select * from")
+
+ tdSql1.query(f"select last(*) from tm1")
+ tdSql1.checkData(0, 1, 1)
+ tdSql1.checkData(0, 4, "binary_1")
+
+
+ tdSql1.query(f"select min(c1),max(c2) from {stbname11}")
+ tdSql1.checkData(0, 0, 0)
+ tdSql1.checkData(0, 1, data_row-1)
+
+ tdSql2.query(f"select count(*), count(c1) from {stbname21}")
+ tdSql2.checkData(0, 0, data_row)
+ tdSql2.checkData(0, 1, data_row)
+
+ tdSql2.query(f"select first(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10) from {stbname21}")
+ tdSql2.checkData(0, 0, 10000)
+ tdSql2.checkData(0, 1, 10000)
+ tdSql2.checkData(0, 2, 10000)
+ tdSql2.checkData(0, 3, f'binary_10000')
+ tdSql2.checkData(0, 4, 10000)
+ tdSql2.checkData(0, 5, 10000%2)
+ tdSql2.checkData(0, 6, 10000)
+ tdSql2.checkData(0, 7, 10000%32768)
+ tdSql2.checkData(0, 8, 10000%128)
+ tdSql2.checkData(0, 9, f'nchar_10000')
+
+ tdSql2.query(f"select last(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10) from {stbname21}")
+ tdSql2.checkData(0, 0, data_row+9999)
+ tdSql2.checkData(0, 1, data_row+9999)
+ tdSql2.checkData(0, 2, data_row+9999)
+ tdSql2.checkData(0, 3, f'binary_{data_row+9999}')
+ tdSql2.checkData(0, 4, data_row+9999)
+ tdSql2.checkData(0, 5, (data_row+9999)%2)
+ tdSql2.checkData(0, 6, data_row+9999)
+ tdSql2.checkData(0, 7, (data_row+9999)%32768)
+ tdSql2.checkData(0, 8, (data_row+9999)%128)
+ tdSql2.checkData(0, 9, f'nchar_{data_row+9999}')
+
+ tdSql1.query(f"select max(c1) from (select top(c1,10) c1 from {stbname11})")
+ tdSql1.checkData(0, 0, data_row-1)
+ tdSql2.query(f"select max(c1) from (select top(c1,10) c1 from {stbname21})")
+ tdSql2.checkData(0, 0, data_row+9999)
+
+ tdSql1.query(f"select avg(c1) from {stbname11}")
+ tdSql1.checkData(0, 0, sum(range(data_row))/data_row)
+ tdSql2.query(f"select avg(c1) from {stbname21}")
+ tdSql2.checkData(0, 0, sum(range(data_row))/data_row+10000)
+
+ tdSql1.query(f"select spread(c1) from {stbname11}")
+ tdSql1.checkData(0, 0, data_row-1)
+ tdSql2.query(f"select spread(c1) from {stbname21}")
+ tdSql2.checkData(0, 0, data_row-1)
+
+ tdSql1.query(f"select max(c1)*2 from {stbname11}")
+ tdSql1.checkData(0, 0, (data_row-1)*2)
+ tdSql2.query(f"select max(c1)*2 from {stbname21}")
+ tdSql2.checkData(0, 0, (data_row+9999)*2)
+
+ tdSql1.query(f"select avg(c1) from {stbname11} where c1 <= 10")
+ tdSql1.checkData(0, 0, 5)
+ tdSql2.query(f"select avg(c1) from {stbname21} where c1 <= 10010")
+ tdSql2.checkData(0, 0, 10005)
+
+ tdSql1.query(f"select * from {stbname11} where tbname like 'tn%'")
+ tdSql1.checkRows(0)
+ tdSql2.query(f"select * from {stbname21} where tbname like 'tm%'")
+ tdSql2.checkRows(0)
+
+ tdSql1.query(f"select max(c1) from {stbname11} group by tbname")
+ tdSql1.checkRows(tbnum)
+ tdSql2.query(f"select max(c1) from {stbname21} group by tbname")
+ tdSql2.checkRows(tbnum)
+
+ tdSql1.error(f"select * from {stbname11}, {stbname21} where {stbname11}.ts = {stbname21}.ts and {stbname11}.st1 = {stbname21}.st1")
+ tdSql2.error(f"select * from {stbname11}, {stbname21} where {stbname11}.ts = {stbname21}.ts and {stbname11}.st1 = {stbname21}.st1")
+
+if __name__ == '__main__':
+
+ tdSql1.execute('reset query cache')
+ tdSql2.execute('reset query cache')
+ tdSql1.execute(f'drop database if exists {dbname11}')
+ tdSql1.execute(f'drop database if exists {dbname12}')
+ tdSql1.execute(f'drop database if exists {dbname21}')
+ tdSql2.execute(f'drop database if exists {dbname21}')
+ tdSql2.execute(f'drop database if exists {dbname11}')
+ tdSql2.execute(f'drop database if exists {dbname12}')
+
+ tdSql1.execute(dbsql(dbname11))
+ tdSql1.query('show databases')
+ tdSql1.checkRows(1)
+ tdSql2.query('show databases')
+ tdSql2.checkRows(0)
+
+ tdSql2.execute(dbsql(dbname21))
+
+ tdSql1.query(f'show databases')
+ tdSql1.checkData(0, 0, dbname11)
+ tdSql2.query(f'show databases')
+ tdSql2.checkData(0, 0, dbname21)
+
+ tdSql1.execute(f'use {dbname11}')
+ tdSql1.query("show stables")
+ tdSql1.checkRows(0)
+ tdSql2.error("show stables")
+
+
+ ### conn1 create stable
+ tdSql1.execute(stbsql(stbname11, db1_stb1_column, db1_stb1_tag))
+ tdSql1.query(f"show stables like '{stbname11}' ")
+ tdSql1.checkRows(1)
+ tdSql2.error("show stables")
+
+ # 'st1 int, st2 float, st3 timestamp, st4 binary(16), st5 double, st6 bool, st7 bigint, st8 smallint, st9 tinyint, st10 nchar(16)'
+ for i in range(100):
+ t1name = f"t{i}"
+ stname = stbname11
+ tags = f'{i}, {i}, {i}, "binary_{i}", {i}, {i%2}, {i}, {i%32768}, {i%128}, "nchar_{i}"'
+ tdSql1.execute(tbsql(t1name, stname, tags))
+
+ tdSql2.error(f'select * from t{random.randint(0, 99)}')
+
+ tdSql1.query("show tables")
+ tdSql1.checkRows(100)
+ tdSql2.error("show tables")
+
+ tdSql1.query(f'select * from {stbname11}')
+ # tdSql1.query(f'select * from t1')
+ tdSql1.checkRows(0)
+ tdSql2.error(f'select * from {stname}')
+
+ # conn1 insert data
+ # 'ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) '
+ nowtime = int(round(time.time() * 1000))
+ for i in range(100):
+ data = f'{nowtime+i*10}, {i}, {i}, {i}, "binary_{i}", {i}, {i%2}, {i}, {i%32768}, {i%128}, "nchar_{i}"'
+ tdSql1.execute(datasql(f"t{i}", data))
+ # tdSql2.error(datasql(f't{i}', data))
+ ti = random.randint(0,99)
+ tdSql1.query(f"select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 from t{ti}")
+ tdSql1.checkData(0, 0, ti)
+ tdSql1.checkData(0, 1, ti)
+ tdSql1.checkData(0, 2, ti)
+ tdSql1.checkData(0, 3, f'binary_{ti}')
+ tdSql1.checkData(0, 4, ti)
+ tdSql1.checkData(0, 5, ti%2)
+ tdSql1.checkData(0, 6, ti)
+ tdSql1.checkData(0, 7, ti%32768)
+ tdSql1.checkData(0, 8, ti%128)
+ tdSql1.checkData(0, 9, f'nchar_{ti}')
+ tdSql2.error(f"select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 from t{ti}")
+
+ # delete conn1.database and reinsert the data to conn1.db and conn2.db
+ tdSql1.execute(f"drop database if exists {dbname11}")
+ tdSql1.query("show databases")
+ tdSql1.checkRows(0)
+ tdSql2.query(f"show databases")
+ tdSql2.checkData(0, 0, dbname21)
+
+ tdSql1.execute(dbsql(dbname11))
+ tdSql1.query(f"show databases")
+ tdSql1.checkData(0, 0, dbname11)
+ tdSql2.query(f"show databases ")
+ tdSql2.checkData(0, 0, dbname21)
+
+ tdSql1.execute(dbsql(dbname12))
+ tdSql1.query("show databases")
+ tdSql1.checkData(0, 0, dbname11)
+ tdSql1.checkData(1, 0, dbname12)
+ tdSql2.query("show databases")
+ tdSql2.checkData(0, 0, dbname21)
+
+ tdSql1.execute(f"use {dbname11}")
+ tdSql1.query("show stables")
+ tdSql1.checkRows(0)
+ tdSql2.error("show stables")
+
+ tdSql2.execute(f"use {dbname21}")
+ tdSql2.query("show stables")
+ tdSql2.checkRows(0)
+ tdSql2.error(f"use {dbname12}")
+
+ tdSql1.execute(stbsql(stbname11, db1_stb1_column, db1_stb1_tag))
+ tdSql1.query("show stables")
+ tdSql1.checkRows(1)
+ tdSql2.query("show stables")
+ tdSql2.checkRows(0)
+
+ tdSql2.execute(stbsql(stbname21, db1_stb1_column, db1_stb1_tag))
+ tdSql1.query("show stables ")
+ tdSql1.checkRows(1)
+ tdSql1.query(f"show stables like '{stbname11}' ")
+ tdSql1.checkRows(1)
+ tdSql2.query("show stables ")
+ tdSql1.checkRows(1)
+ tdSql2.query(f"show stables like '{stbname21}' ")
+ tdSql1.checkRows(1)
+
+ for i in range(tbnum):
+ t1name = f"tm{i}"
+ t2name = f"tn{i}"
+ s1tname = stbname11
+ s2tname = stbname21
+ tags = f'{i}, {i}, {i}, "binary_{i}", {i}, {i % 2}, {i}, {i % 32768}, {i % 128}, "nchar_{i}"'
+ tdSql1.execute(tbsql(t1name, s1tname, tags))
+ # tdSql2.error(f'select * from {t1name}')
+ tdSql2.execute(tbsql(t2name, s2tname, tags))
+ # tdSql2.query(f'select * from {t2name}')
+ # tdSql1.error(f'select * from {t2name}')
+
+
+ tdSql1.query("show tables like 'tm%' ")
+ tdSql1.checkRows(tbnum)
+ tdSql2.query("show tables like 'tn%' ")
+ tdSql2.checkRows(tbnum)
+
+ for i in range(data_row):
+ data1 = f'{nowtime + i * 10}, {i}, {i}, {i}, "binary_{i}", {i}, {i % 2}, {i}, {i % 32768}, {i % 128}, "nchar_{i}"'
+ data2 = f'{nowtime+i*10}, {i+10000}, {i+10000}, {i+10000}, "binary_{i+10000}", {i+10000}, {(i+10000)%2}, {i+10000}, {(i+10000)%32768}, {(i+10000)%128}, "nchar_{i+10000}" '
+ tdSql1.execute(datasql(f"tm{i}", data1))
+ tdSql2.execute(datasql(f'tn{i}', data2))
+
+ testquery()
\ No newline at end of file
diff --git a/tests/pytest/fulltest.bat b/tests/pytest/fulltest.bat
new file mode 100644
index 0000000000000000000000000000000000000000..fd74f2ad029c982a3a3dd98ae0c8df264bab9c66
--- /dev/null
+++ b/tests/pytest/fulltest.bat
@@ -0,0 +1,22 @@
+
+python .\test.py -f insert\basic.py
+python .\test.py -f insert\int.py
+python .\test.py -f insert\float.py
+python .\test.py -f insert\bigint.py
+python .\test.py -f insert\bool.py
+python .\test.py -f insert\double.py
+python .\test.py -f insert\smallint.py
+python .\test.py -f insert\tinyint.py
+python .\test.py -f insert\date.py
+python .\test.py -f insert\binary.py
+python .\test.py -f insert\nchar.py
+
+python .\test.py -f query\filter.py
+python .\test.py -f query\filterCombo.py
+python .\test.py -f query\queryNormal.py
+python .\test.py -f query\queryError.py
+python .\test.py -f query\filterAllIntTypes.py
+python .\test.py -f query\filterFloatAndDouble.py
+python .\test.py -f query\filterOtherTypes.py
+python .\test.py -f query\querySort.py
+python .\test.py -f query\queryJoin.py
\ No newline at end of file
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index 32551042a69f63a5fbf9eb84e26a1c4a8c6ce4b7..b6b6597d23d1225b056a9d78f65e9b9d760f27a6 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -48,7 +48,7 @@ python3 ./test.py -f table/del_stable.py
#stable
python3 ./test.py -f stable/insert.py
-python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
+# python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
# tag
python3 ./test.py -f tag_lite/filter.py
@@ -217,7 +217,7 @@ python3 ./test.py -f perfbenchmark/bug3433.py
python3 ./test.py -f perfbenchmark/taosdemoInsert.py
#taosdemo
-python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
+# python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
#query
@@ -345,6 +345,7 @@ python3 ./test.py -f functions/function_spread.py -r 1
python3 ./test.py -f functions/function_stddev.py -r 1
python3 ./test.py -f functions/function_sum.py -r 1
python3 ./test.py -f functions/function_top.py -r 1
+python3 ./test.py -f functions/function_sample.py -r 1
python3 ./test.py -f functions/function_twa.py -r 1
python3 ./test.py -f functions/function_twa_test2.py
python3 ./test.py -f functions/function_stddev_td2555.py
@@ -403,7 +404,7 @@ python3 ./test.py -f query/queryDiffColsOr.py
python3 ./test.py -f client/nettest.py
python3 ./test.py -f query/queryRegex.py
-
+python3 ./test.py -f tools/taosdemoTestdatatype.py
#======================p4-end===============
diff --git a/tests/pytest/functions/function_sample.py b/tests/pytest/functions/function_sample.py
new file mode 100644
index 0000000000000000000000000000000000000000..f86805082bd9ffe52e192e823c5abebaff6c9c4e
--- /dev/null
+++ b/tests/pytest/functions/function_sample.py
@@ -0,0 +1,69 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+import collections
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.sample_times = 10000
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts + i, i, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+
+
+ print("begin sampling. sql: select sample(col1, 2) from test1")
+ freqDict = collections.defaultdict(int)
+ for i in range(self.sample_times):
+ tdSql.query('select sample(col1, 2) from test1')
+ res1 = tdSql.getData(0, 1);
+ res2 = tdSql.getData(1, 1);
+ freqDict[res1] = freqDict[res1] + 1
+ freqDict[res2] = freqDict[res2] + 1
+ print("end sampling.")
+
+ lower_bound = self.sample_times/5 - self.sample_times/50;
+ upper_bound = self.sample_times/5 + self.sample_times/50;
+ for i in range(self.rowNum):
+ print("{} are sampled in {} times".format(i, freqDict[i]))
+
+ if not (freqDict[i]>=lower_bound and freqDict[i]<=upper_bound):
+ print("run it aggain. if it keeps appearing, sample function bug")
+ caller = inspect.getframeinfo(inspect.stack()[0][0])
+ args = (caller.filename, caller.lineno-2)
+ tdLog.exit("{}({}) failed. sample function failure".format(args[0], args[1]))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/functions/function_sample_restart.py b/tests/pytest/functions/function_sample_restart.py
new file mode 100644
index 0000000000000000000000000000000000000000..f86805082bd9ffe52e192e823c5abebaff6c9c4e
--- /dev/null
+++ b/tests/pytest/functions/function_sample_restart.py
@@ -0,0 +1,69 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+import numpy as np
+import collections
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ self.rowNum = 10
+ self.sample_times = 10000
+ self.ts = 1537146000000
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
+ tdSql.execute("create table test1 using test tags('beijing')")
+ for i in range(self.rowNum):
+ tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ % (self.ts + i, i, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+
+
+ print("begin sampling. sql: select sample(col1, 2) from test1")
+ freqDict = collections.defaultdict(int)
+ for i in range(self.sample_times):
+ tdSql.query('select sample(col1, 2) from test1')
+ res1 = tdSql.getData(0, 1);
+ res2 = tdSql.getData(1, 1);
+ freqDict[res1] = freqDict[res1] + 1
+ freqDict[res2] = freqDict[res2] + 1
+ print("end sampling.")
+
+ lower_bound = self.sample_times/5 - self.sample_times/50;
+ upper_bound = self.sample_times/5 + self.sample_times/50;
+ for i in range(self.rowNum):
+ print("{} are sampled in {} times".format(i, freqDict[i]))
+
+ if not (freqDict[i]>=lower_bound and freqDict[i]<=upper_bound):
+ print("run it aggain. if it keeps appearing, sample function bug")
+ caller = inspect.getframeinfo(inspect.stack()[0][0])
+ args = (caller.filename, caller.lineno-2)
+ tdLog.exit("{}({}) failed. sample function failure".format(args[0], args[1]))
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/insert/binary.py b/tests/pytest/insert/binary.py
index 0cbb7876c6194041a160f8fee7271f0c76d3b90c..e91a20e65cd04dd64a88af88259e8e25eebf595c 100644
--- a/tests/pytest/insert/binary.py
+++ b/tests/pytest/insert/binary.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
+import platform
import sys
from util.log import *
from util.cases import *
@@ -53,9 +54,10 @@ class TDTestCase:
tdLog.info("tdSql.checkData(0, 0, '34567')")
tdSql.checkData(0, 0, '34567')
tdLog.info("insert into tb values (now+4a, \"'';\")")
- config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '')
- result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines())
- if "Query OK" not in result: tdLog.exit("err:insert '';")
+ if platform.system() == "Linux":
+ config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '')
+ result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines())
+ if "Query OK" not in result: tdLog.exit("err:insert '';")
tdLog.info('drop database db')
tdSql.execute('drop database db')
tdLog.info('show databases')
diff --git a/tests/pytest/insert/nchar.py b/tests/pytest/insert/nchar.py
index 5ad52b96a1555b3ccd622fd4bf88c7a0b26051b5..023da5b014864a2d010e6ec6acc16a33ccb20424 100644
--- a/tests/pytest/insert/nchar.py
+++ b/tests/pytest/insert/nchar.py
@@ -15,6 +15,7 @@ import sys
from util.log import *
from util.cases import *
from util.sql import *
+import platform
class TDTestCase:
@@ -37,7 +38,7 @@ class TDTestCase:
tdSql.error("insert into tb values (now, 'taosdata001')")
- tdSql.error("insert into tb(now, 😀)")
+ if platform.system() == "Linux" : tdSql.error("insert into tb(now, 😀)")
tdSql.query("select * from tb")
tdSql.checkRows(2)
diff --git a/tests/pytest/query/queryNormal.py b/tests/pytest/query/queryNormal.py
index 52e49a57c6883f6fe57df887756bbf2d27199806..a1789c8909f542ba3dcae83042ab50cde9e58e32 100644
--- a/tests/pytest/query/queryNormal.py
+++ b/tests/pytest/query/queryNormal.py
@@ -17,6 +17,7 @@ from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
+import platform
class TDTestCase:
def init(self, conn, logSql):
@@ -137,8 +138,9 @@ class TDTestCase:
tdSql.checkData(1, 1, 421)
tdSql.checkData(1, 2, "tm1")
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ if platform.system() == "Linux":
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
tdSql.query("select last(*) from m1 group by tbname")
tdSql.checkData(0, 0, "2020-03-01 01:01:01")
diff --git a/tests/pytest/query/queryRegex.py b/tests/pytest/query/queryRegex.py
index 25afa6395f62faeeac51bab73ed742626e4fba90..c955920bfd553f9b9d48b2e8f0730a361afdc8df 100644
--- a/tests/pytest/query/queryRegex.py
+++ b/tests/pytest/query/queryRegex.py
@@ -29,37 +29,75 @@ class TDTestCase:
print("==============step1")
##2021-09-17 For jira: https://jira.taosdata.com:18080/browse/TD-6585
tdSql.execute(
- "create stable if not exists stb_test(ts timestamp,c0 binary(32)) tags(t0 binary(32))"
+ "create stable if not exists stb_test(ts timestamp,c0 binary(32),c1 int) tags(t0 binary(32))"
)
tdSql.execute(
'create table if not exists stb_1 using stb_test tags("abcdefgasdfg12346")'
)
- tdLog.info('insert into stb_1 values("2021-09-13 10:00:00.001","abcefdasdqwerxasdazx12345"')
- tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.001","abcefdasdqwerxasdazx12345")')
+ tdLog.info('insert into stb_1 values("2021-09-13 10:00:00.001","abcefdasdqwerxasdazx12345",15')
+ tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.002","abcefdasdqwerxasdazx12345",15)')
+ tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.003","aaaaafffwwqqxzz",16)')
+ tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.004","fffwwqqxzz",17)')
+ tdSql.execute('insert into stb_1 values("2020-10-13 10:00:00.001","abcd\\\efgh",100)')
+
tdSql.query('select * from stb_test where tbname match "asd"')
tdSql.checkRows(0)
-
tdSql.query('select * from stb_test where tbname nmatch "asd"')
- tdSql.checkRows(1)
+ tdSql.checkRows(4)
tdSql.query('select * from stb_test where c0 match "abc"')
+ tdSql.checkRows(2)
+ tdSql.query('select * from stb_test where c0 nmatch "abc"')
+ tdSql.checkRows(2)
+
+ tdSql.query('select * from stb_test where c0 match "^a"')
+ tdSql.checkRows(3)
+ tdSql.query('select * from stb_test where c0 nmatch "^a"')
tdSql.checkRows(1)
+
+ tdSql.query('select * from stb_test where c0 match "5$"')
tdSql.checkData(0,1,"abcefdasdqwerxasdazx12345")
+ tdSql.query('select * from stb_test where c0 nmatch "5$"')
+ tdSql.checkRows(3)
+
+
+ tdSql.query('select * from stb_test where c0 match "a*"')
+ tdSql.checkRows(4)
+ tdSql.query('select * from stb_test where c0 nmatch "a*"')
+ tdSql.checkRows(0)
- tdSql.query('select * from stb_test where c0 nmatch "abc"')
+
+ tdSql.query('select * from stb_test where c0 match "a+"')
+ tdSql.checkRows(3)
+ tdSql.query('select * from stb_test where c0 nmatch "a+"')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from stb_test where c0 match "a?"')
+ tdSql.checkRows(4)
+ tdSql.query('select * from stb_test where c0 nmatch "a?"')
tdSql.checkRows(0)
+
+
+ tdSql.query('select last(c1) from stb_test where c0 match "a"')
+ tdSql.checkData(0,0,16)
+
+
+ tdSql.query('select count(c1) from stb_test where t0 match "a"')
+ tdSql.checkData(0,0,4)
tdSql.error('select * from stb_test where c0 match abc')
tdSql.error('select * from stb_test where c0 nmatch abc')
-
- tdSql.execute('insert into stb_1 values("2020-10-13 10:00:00.001","abcd\\\efgh")')
+
tdSql.query("select * from stb_1 where c0 match '\\\\'")
tdSql.checkRows(1)
+ tdSql.query("select * from stb_1 where c0 nmatch '\\\\'")
+ tdSql.checkRows(3)
+
diff --git a/tests/pytest/test-all.bat b/tests/pytest/test-all.bat
new file mode 100644
index 0000000000000000000000000000000000000000..1f1e2c1727527e91f7632213992607d6221eac85
--- /dev/null
+++ b/tests/pytest/test-all.bat
@@ -0,0 +1,15 @@
+@echo off
+SETLOCAL EnableDelayedExpansion
+for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a")
+for /F "usebackq tokens=*" %%i in (fulltest.bat) do (
+ echo Processing %%i
+ call %%i ARG1 -w 1 -m %1 > result.txt 2>error.txt
+ if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. )
+)
+exit
+
+:colorEcho
+echo off
+ "%~2"
+findstr /v /a:%1 /R "^$" "%~2" nul
+del "%~2" > nul 2>&1i
\ No newline at end of file
diff --git a/tests/pytest/test.py b/tests/pytest/test.py
index 97dca6be1811ee87a31661e018616f469d5fd4ca..a96ac21496431b811f26fa82091c92f6ae8ecb9a 100644
--- a/tests/pytest/test.py
+++ b/tests/pytest/test.py
@@ -18,6 +18,7 @@ import getopt
import subprocess
import time
from distutils.log import warn as printf
+from fabric2 import Connection
from util.log import *
from util.dnodes import *
@@ -35,8 +36,9 @@ if __name__ == "__main__":
logSql = True
stop = 0
restart = False
- opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghr', [
- 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help'])
+ windows = 0
+ opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrw', [
+ 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'windows'])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
@@ -49,6 +51,7 @@ if __name__ == "__main__":
tdLog.printNoPrefix('-c Test Cluster Flag')
tdLog.printNoPrefix('-g valgrind Test Flag')
tdLog.printNoPrefix('-r taosd restart test')
+ tdLog.printNoPrefix('-w taos on windows')
sys.exit(0)
if key in ['-r', '--restart']:
@@ -81,6 +84,9 @@ if __name__ == "__main__":
if key in ['-s', '--stop']:
stop = 1
+ if key in ['-w', '--windows']:
+ windows = 1
+
if (stop != 0):
if (valgrind == 0):
toBeKilled = "taosd"
@@ -111,66 +117,81 @@ if __name__ == "__main__":
tdLog.info('stop All dnodes')
- tdDnodes.init(deployPath)
- tdDnodes.setTestCluster(testCluster)
- tdDnodes.setValgrind(valgrind)
- tdDnodes.stopAll()
- is_test_framework = 0
- key_word = 'tdCases.addLinux'
- try:
- if key_word in open(fileName).read():
- is_test_framework = 1
- except:
- pass
- if is_test_framework:
- moduleName = fileName.replace(".py", "").replace("/", ".")
- uModule = importlib.import_module(moduleName)
- try:
- ucase = uModule.TDTestCase()
- tdDnodes.deploy(1,ucase.updatecfgDict)
- except :
- tdDnodes.deploy(1,{})
- else:
- tdDnodes.deploy(1,{})
- tdDnodes.start(1)
-
if masterIp == "":
host = '127.0.0.1'
else:
host = masterIp
tdLog.info("Procedures for tdengine deployed in %s" % (host))
-
- tdCases.logSql(logSql)
-
- if testCluster:
- tdLog.info("Procedures for testing cluster")
- if fileName == "all":
- tdCases.runAllCluster()
- else:
- tdCases.runOneCluster(fileName)
- else:
+ if windows:
+ tdCases.logSql(logSql)
tdLog.info("Procedures for testing self-deployment")
+ td_clinet = TDSimClient("C:\\TDengine")
+ td_clinet.deploy()
+ remote_conn = Connection("root@%s"%host)
+ with remote_conn.cd('/var/lib/jenkins/workspace/TDinternal/community/tests/pytest'):
+ remote_conn.run("python3 ./test.py")
conn = taos.connect(
- host,
- config=tdDnodes.getSimCfgPath())
- if fileName == "all":
- tdCases.runAllLinux(conn)
+ host="%s"%(host),
+ config=td_clinet.cfgDir)
+ tdCases.runOneWindows(conn, fileName)
+ else:
+ tdDnodes.init(deployPath)
+ tdDnodes.setTestCluster(testCluster)
+ tdDnodes.setValgrind(valgrind)
+ tdDnodes.stopAll()
+ is_test_framework = 0
+ key_word = 'tdCases.addLinux'
+ try:
+ if key_word in open(fileName).read():
+ is_test_framework = 1
+ except:
+ pass
+ if is_test_framework:
+ moduleName = fileName.replace(".py", "").replace("/", ".")
+ uModule = importlib.import_module(moduleName)
+ try:
+ ucase = uModule.TDTestCase()
+ tdDnodes.deploy(1,ucase.updatecfgDict)
+ except :
+ tdDnodes.deploy(1,{})
+ else:
+ pass
+ tdDnodes.deploy(1,{})
+ tdDnodes.start(1)
+
+
+
+ tdCases.logSql(logSql)
+
+ if testCluster:
+ tdLog.info("Procedures for testing cluster")
+ if fileName == "all":
+ tdCases.runAllCluster()
+ else:
+ tdCases.runOneCluster(fileName)
else:
- tdCases.runOneLinux(conn, fileName)
- if restart:
- if fileName == "all":
- tdLog.info("not need to query ")
- else:
- sp = fileName.rsplit(".", 1)
- if len(sp) == 2 and sp[1] == "py":
- tdDnodes.stopAll()
- tdDnodes.start(1)
- time.sleep(1)
- conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
- tdLog.info("Procedures for tdengine deployed in %s" % (host))
- tdLog.info("query test after taosd restart")
- tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py")
+ tdLog.info("Procedures for testing self-deployment")
+ conn = taos.connect(
+ host,
+ config=tdDnodes.getSimCfgPath())
+ if fileName == "all":
+ tdCases.runAllLinux(conn)
else:
- tdLog.info("not need to query")
+ tdCases.runOneWindows(conn, fileName)
+ if restart:
+ if fileName == "all":
+ tdLog.info("not need to query ")
+ else:
+ sp = fileName.rsplit(".", 1)
+ if len(sp) == 2 and sp[1] == "py":
+ tdDnodes.stopAll()
+ tdDnodes.start(1)
+ time.sleep(1)
+ conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
+ tdLog.info("Procedures for tdengine deployed in %s" % (host))
+ tdLog.info("query test after taosd restart")
+ tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py")
+ else:
+ tdLog.info("not need to query")
conn.close()
diff --git a/tests/pytest/tools/taosdemoTestdatatype.py b/tests/pytest/tools/taosdemoTestdatatype.py
new file mode 100644
index 0000000000000000000000000000000000000000..e32d895571da7d2a101dc32201ebba4754ec4740
--- /dev/null
+++ b/tests/pytest/tools/taosdemoTestdatatype.py
@@ -0,0 +1,94 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import os
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.numberOfTables = 10
+ self.numberOfRecords = 10
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosdemo not found!")
+ else:
+ tdLog.info("taosdemo found in %s" % buildPath)
+ binPath = buildPath + "/build/bin/"
+
+ os.system("%staosdemo -d test002 -y -t %d -n %d -b INT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" %
+ (binPath, self.numberOfTables, self.numberOfRecords))
+
+ tdSql.execute('use test002')
+ tdSql.query("select count(*) from meters")
+ tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords)
+
+ tdSql.query("select * from meters")
+ tdSql.checkRows(self.numberOfTables * self.numberOfRecords)
+
+ tdLog.info('insert into d1 values(now,100,"abcd1234","abcdefgh12345678","abcdefgh","abcdefgh")')
+ tdSql.execute('insert into d1 values(now,100,"abcd1234","abcdefgh12345678","abcdefgh","abcdefgh")')
+ tdSql.query("select * from meters")
+ tdSql.checkRows(101)
+
+ tdSql.error('insert into d1 values(now,100,"abcd","abcd"')
+ tdSql.error('insert into d1 values(now,100,100,100)')
+
+ os.system("%staosdemo -d test002 -y -t %d -n %d --data-type INT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" %
+ (binPath, self.numberOfTables, self.numberOfRecords))
+
+ tdSql.execute('use test002')
+ tdSql.query("select count(*) from meters")
+ tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords)
+
+
+ os.system("%staosdemo -d test002 -y -t %d -n %d -bINT,nchar\\(8\\),binary\\(16\\),binary,nchar -w 8" %
+ (binPath, self.numberOfTables, self.numberOfRecords))
+
+ tdSql.execute('use test002')
+ tdSql.query("select count(*) from meters")
+ tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords)
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/update/merge_commit_data2_update0.py b/tests/pytest/update/merge_commit_data2_update0.py
index def50e04661b1752668202359eec7dd89df9b6f0..7e3c65a0a2f2e3c0b01977b0b28cb0ec8a2530ea 100644
--- a/tests/pytest/update/merge_commit_data2_update0.py
+++ b/tests/pytest/update/merge_commit_data2_update0.py
@@ -27,7 +27,7 @@ class TDTestCase:
def restart_taosd(self,db):
tdDnodes.stop(1)
- tdDnodes.startWithoutSleep(1)
+ tdDnodes.start(1)
tdSql.execute("use %s;" % db)
def date_to_timestamp_microseconds(self, date):
diff --git a/tests/pytest/util/cases.py b/tests/pytest/util/cases.py
index 2fc1ac8515e47f9354483ebb590897eea96dcc57..fd3926a6f1bc79fee81c7d438dceb8eedcb7803d 100644
--- a/tests/pytest/util/cases.py
+++ b/tests/pytest/util/cases.py
@@ -34,7 +34,7 @@ class TDCases:
self.clusterCases = []
def __dynamicLoadModule(self, fileName):
- moduleName = fileName.replace(".py", "").replace("/", ".")
+ moduleName = fileName.replace(".py", "").replace(os.sep, ".")
return importlib.import_module(moduleName, package='..')
def logSql(self, logSql):
@@ -80,7 +80,7 @@ class TDCases:
runNum += 1
continue
- def runAllWindows(self, conn):
+ def runAllWindows(self, conn, fileName):
# TODO: load all Windows cases here
runNum = 0
for tmp in self.windowsCases:
@@ -101,12 +101,17 @@ class TDCases:
for tmp in self.windowsCases:
if tmp.name.find(fileName) != -1:
case = testModule.TDTestCase()
- case.init(conn)
- case.run()
+ case.init(conn, self._logSql)
+ try:
+ case.run()
+ except Exception as e:
+ tdLog.notice(repr(e))
+ tdLog.exit("%s failed" % (fileName))
case.stop()
runNum += 1
continue
tdLog.notice("total %d Windows case(s) executed" % (runNum))
+
def runAllCluster(self):
# TODO: load all cluster case module here
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index 0208f884b691a20e4b4456fe8165797969305674..ff3c271cd8ab1ea2480f3d122513badab09016fc 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -15,6 +15,8 @@ import sys
import os
import os.path
import platform
+import pathlib
+import shutil
import subprocess
from time import sleep
from util.log import *
@@ -62,32 +64,45 @@ class TDSimClient:
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
-
+ def os_string(self,path):
+ os_path = path.replace("/",os.sep)
+ return os_path
def deploy(self):
- self.logDir = "%s/sim/psim/log" % (self.path)
- self.cfgDir = "%s/sim/psim/cfg" % (self.path)
- self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
-
- cmd = "rm -rf " + self.logDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
-
- cmd = "mkdir -p " + self.logDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
-
- cmd = "rm -rf " + self.cfgDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
-
- cmd = "mkdir -p " + self.cfgDir
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
-
- cmd = "touch " + self.cfgPath
- if os.system(cmd) != 0:
- tdLog.exit(cmd)
-
+ self.logDir = self.os_string("%s/sim/psim/log" % (self.path))
+ self.cfgDir = self.os_string("%s/sim/psim/cfg" % (self.path))
+ self.cfgPath = self.os_string("%s/sim/psim/cfg/taos.cfg" % (self.path))
+
+ # cmd = "rm -rf " + self.logDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ if os.path.exists(self.logDir):
+ try:
+ shutil.rmtree(self.logDir)
+ except:
+ tdLog.exit("del %s failed"%self.logDir)
+ # cmd = "mkdir -p " + self.logDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ os.makedirs(self.logDir)
+ # cmd = "rm -rf " + self.cfgDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ if os.path.exists(self.cfgDir):
+ try:
+ shutil.rmtree(self.cfgDir)
+ except:
+ tdLog.exit("del %s failed"%self.cfgDir)
+ # cmd = "mkdir -p " + self.cfgDir
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ os.makedirs(self.cfgDir)
+ # cmd = "touch " + self.cfgPath
+ # if os.system(cmd) != 0:
+ # tdLog.exit(cmd)
+ try:
+ pathlib.Path(self.cfgPath).touch()
+ except:
+ tdLog.exit("create %s failed"%self.cfgPath)
if self.testCluster:
self.cfg("masterIp", "192.168.0.1")
self.cfg("secondIp", "192.168.0.2")
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index 188ce1405541cbbb230ceb186c44cfd4230925fc..2f4025830d73713d7d618aa8219a1d09c0dad502 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -21,6 +21,10 @@ run general/compute/bottom.sim
run general/compute/count.sim
run general/compute/diff.sim
run general/compute/diff2.sim
+run general/compute/mavg.sim
+run general/compute/mavg2.sim
+run general/compute/csum.sim
+run general/compute/csum2.sim
run general/compute/first.sim
run general/compute/interval.sim
run general/compute/last.sim
diff --git a/tests/script/general/compute/csum.sim b/tests/script/general/compute/csum.sim
new file mode 100644
index 0000000000000000000000000000000000000000..1f291d784fa848e8da9abe502884cdbad122973d
--- /dev/null
+++ b/tests/script/general/compute/csum.sim
@@ -0,0 +1,98 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/exec.sh -n dnode1 -s start
+sleep 200
+sql connect
+
+$dbPrefix = m_di_db
+$tbPrefix = m_di_tb
+$mtPrefix = m_di_mt
+$tbNum = 10
+$rowNum = 20
+$totalNum = 200
+
+print =============== step1
+$i = 0
+$db = $dbPrefix . $i
+$mt = $mtPrefix . $i
+
+sql drop database $db -x step1
+step1:
+sql create database $db
+sql use $db
+sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int)
+
+$i = 0
+while $i < $tbNum
+ $tb = $tbPrefix . $i
+ sql create table $tb using $mt tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $cc = $x * 60000
+ $ms = 1601481600000 + $cc
+ sql insert into $tb values ($ms , $x )
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+endw
+
+sleep 100
+
+print =============== step2
+$i = 1
+$tb = $tbPrefix . $i
+
+sql select csum(tbcol) from $tb
+print ===> $data11
+if $data11 != 1 then
+ return -1
+endi
+
+print =============== step3
+$cc = 4 * 60000
+$ms = 1601481600000 + $cc
+sql select csum(tbcol) from $tb where ts > $ms
+print ===> $data11
+if $data11 != 11 then
+ return -1
+endi
+
+$cc = 4 * 60000
+$ms = 1601481600000 + $cc
+sql select csum(tbcol) from $tb where ts <= $ms
+print ===> $data11
+if $data11 != 1 then
+ return -1
+endi
+
+print =============== step4
+sql select csum(tbcol) as b from $tb
+print ===> $data11
+if $data11 != 1 then
+ return -1
+endi
+
+print =============== step5
+sql select csum(tbcol) as b from $tb interval(1m) -x step5
+ return -1
+step5:
+
+print =============== step6
+$cc = 4 * 60000
+$ms = 1601481600000 + $cc
+sql select csum(tbcol) as b from $tb where ts <= $ms interval(1m) -x step6
+ return -1
+step6:
+
+print =============== clear
+sql drop database $db
+sql show databases
+if $rows != 0 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/compute/csum2.sim b/tests/script/general/compute/csum2.sim
new file mode 100644
index 0000000000000000000000000000000000000000..506070ae369ccb4c1d2bc28d149c7126079a2b54
--- /dev/null
+++ b/tests/script/general/compute/csum2.sim
@@ -0,0 +1,163 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/exec.sh -n dnode1 -s start
+sleep 200
+sql connect
+
+$dbPrefix = m_di_db
+$tbPrefix = m_di_tb
+$mtPrefix = m_di_mt
+$tbNum = 2
+$rowNum = 1000
+$totalNum = 2000
+
+print =============== step1
+$i = 0
+$db = $dbPrefix . $i
+$mt = $mtPrefix . $i
+
+sql drop database $db -x step1
+step1:
+sql create database $db
+sql use $db
+sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS(tgcol int)
+
+$i = 0
+while $i < $tbNum
+ $tb = $tbPrefix . $i
+ sql create table $tb using $mt tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $cc = $x * 60000
+ $ms = 1601481600000 + $cc
+
+ $tinyint = $x / 128
+ sql insert into $tb values ($ms , $x , $x , $x , $x , $tinyint , $x , $x , $x , $x )
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+endw
+
+sleep 100
+
+print =============== step2
+$i = 1
+$tb = $tbPrefix . $i
+
+sql select csum(c1) from $tb
+print ===> $data11
+if $data11 != 1 then
+ return -1
+endi
+sql select csum(c2) from $tb
+print ===> $data11
+if $data11 != 1.000000000 then
+ return -1
+endi
+sql select csum(c3) from $tb
+print ===> $data11
+if $data11 != 1 then
+ return -1
+endi
+sql select csum(c4) from $tb
+print ===> $data11
+if $data11 != 1 then
+ return -1
+endi
+sql select csum(c5) from $tb
+print ===> $data11
+if $data11 != 0 then
+ return -1
+endi
+sql select csum(c6) from $tb
+print ===> $data11
+if $data11 != 1.000000000 then
+ return -1
+endi
+sql_error select csum(c7) from $tb
+sql_error select csum(c8) from $tb
+sql_error select csum(c9) from $tb
+sql_error select csum(ts) from $tb
+sql_error select csum(c1), csum(c2) from $tb
+#sql_error select 2+csum(c1) from $tb
+sql_error select csum(c1+2) from $tb
+sql_error select csum(c1) from $tb where ts > 0 and ts < now + 100m interval(10m)
+sql_error select csum(c1) from $mt
+sql_error select csum(csum(c1)) from $tb
+sql_error select csum(c1) from m_di_tb1 where c2 like '2%'
+
+
+print =============== step3
+sql select csum(c1) from $tb where c1 > 5
+print ===> $data11
+if $data11 != 13 then
+ return -1
+endi
+sql select csum(c2) from $tb where c2 > 5
+print ===> $data11
+if $data11 != 13.000000000 then
+ return -1
+endi
+sql select csum(c3) from $tb where c3 > 5
+print ===> $data11
+if $data11 != 13 then
+ return -1
+endi
+sql select csum(c4) from $tb where c4 > 5
+print ===> $data11
+if $data11 != 13 then
+ return -1
+endi
+sql select csum(c5) from $tb where c5 > 5
+print ===> $data11
+if $data11 != 12 then
+ return -1
+endi
+sql select csum(c6) from $tb where c6 > 5
+print ===> $data11
+if $data11 != 13.000000000 then
+ return -1
+endi
+
+print =============== step4
+sql select csum(c1) from $tb where c1 > 5 and c2 < $rowNum
+print ===> $data11
+if $data11 != 13 then
+ return -1
+endi
+
+sql select csum(c1) from $tb where c9 like '%9' and c1 <= 20
+print ===> $rows
+if $rows != 2 then
+ return -1
+endi
+print ===>$data01, $data11
+if $data01 != 9 then
+ return -1
+endi
+if $data11 != 28 then
+ return -1
+endi
+
+print =============== step5
+sql select csum(c1) as b from $tb interval(1m) -x step5
+ return -1
+step5:
+
+print =============== step6
+sql select csum(c1) as b from $tb where ts < now + 4m interval(1m) -x step6
+ return -1
+step6:
+
+print =============== clear
+#sql drop database $db
+#sql show databases
+#if $rows != 0 then
+# return -1
+#endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/compute/mavg.sim b/tests/script/general/compute/mavg.sim
new file mode 100644
index 0000000000000000000000000000000000000000..d33b620842cef880d17662e82831a082f8ce1cf9
--- /dev/null
+++ b/tests/script/general/compute/mavg.sim
@@ -0,0 +1,98 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/exec.sh -n dnode1 -s start
+sleep 200
+sql connect
+
+$dbPrefix = m_di_db
+$tbPrefix = m_di_tb
+$mtPrefix = m_di_mt
+$tbNum = 10
+$rowNum = 20
+$totalNum = 200
+
+print =============== step1
+$i = 0
+$db = $dbPrefix . $i
+$mt = $mtPrefix . $i
+
+sql drop database $db -x step1
+step1:
+sql create database $db
+sql use $db
+sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int)
+
+$i = 0
+while $i < $tbNum
+ $tb = $tbPrefix . $i
+ sql create table $tb using $mt tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $cc = $x * 60000
+ $ms = 1601481600000 + $cc
+ sql insert into $tb values ($ms , $x )
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+endw
+
+sleep 100
+
+print =============== step2
+$i = 1
+$tb = $tbPrefix . $i
+
+sql select mavg(tbcol,2) from $tb
+print ===> $data11
+if $data11 != 1.500000000 then
+ return -1
+endi
+
+print =============== step3
+$cc = 4 * 60000
+$ms = 1601481600000 + $cc
+sql select mavg(tbcol,2) from $tb where ts > $ms
+print ===> $data11
+if $data11 != 6.500000000 then
+ return -1
+endi
+
+$cc = 4 * 60000
+$ms = 1601481600000 + $cc
+sql select mavg(tbcol,2) from $tb where ts <= $ms
+print ===> $data11
+if $data11 != 1.500000000 then
+ return -1
+endi
+
+print =============== step4
+sql select mavg(tbcol,2) as b from $tb
+print ===> $data11
+if $data11 != 1.500000000 then
+ return -1
+endi
+
+print =============== step5
+sql select mavg(tbcol, 2) as b from $tb interval(1m) -x step5
+ return -1
+step5:
+
+print =============== step6
+$cc = 4 * 60000
+$ms = 1601481600000 + $cc
+sql select mavg(tbcol, 2) as b from $tb where ts <= $ms interval(1m) -x step6
+ return -1
+step6:
+
+print =============== clear
+sql drop database $db
+sql show databases
+if $rows != 0 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/compute/mavg2.sim b/tests/script/general/compute/mavg2.sim
new file mode 100644
index 0000000000000000000000000000000000000000..60b170e270505b7c3e8d2ee174a4e3b8a4ad223d
--- /dev/null
+++ b/tests/script/general/compute/mavg2.sim
@@ -0,0 +1,159 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/exec.sh -n dnode1 -s start
+sleep 200
+sql connect
+
+$dbPrefix = m_di_db
+$tbPrefix = m_di_tb
+$mtPrefix = m_di_mt
+$tbNum = 2
+$rowNum = 10000
+$totalNum = 20000
+
+print =============== step1
+$i = 0
+$db = $dbPrefix . $i
+$mt = $mtPrefix . $i
+
+sql drop database $db -x step1
+step1:
+sql create database $db
+sql use $db
+sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS(tgcol int)
+
+$i = 0
+while $i < $tbNum
+ $tb = $tbPrefix . $i
+ sql create table $tb using $mt tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $cc = $x * 60000
+ $ms = 1601481600000 + $cc
+
+ $tinyint = $x / 128
+ sql insert into $tb values ($ms , $x , $x , $x , $x , $tinyint , $x , $x , $x , $x )
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+endw
+
+sleep 100
+
+print =============== step2
+$i = 1
+$tb = $tbPrefix . $i
+
+sql select mavg(c1, 2) from $tb
+print ===> $data11
+if $data11 != 1.500000000 then
+ return -1
+endi
+sql select mavg(c2, 2) from $tb
+print ===> $data11
+if $data11 != 1.500000000 then
+ return -1
+endi
+sql select mavg(c3, 2) from $tb
+print ===> $data11
+if $data11 != 1.500000000 then
+ return -1
+endi
+sql select mavg(c4, 2) from $tb
+print ===> $data11
+if $data11 != 1.500000000 then
+ return -1
+endi
+sql select mavg(c5, 2) from $tb
+print ===> $data11
+if $data11 != 0.000000000 then
+ return -1
+endi
+sql select mavg(c6, 2) from $tb
+print ===> $data11
+if $data11 != 1.500000000 then
+ return -1
+endi
+sql_error select mavg(c7,2) from $tb
+sql_error select mavg(c8,2) from $tb
+sql_error select mavg(c9,2) from $tb
+sql_error select mavg(ts,2) from $tb
+sql_error select mavg(c1,2), mavg(c2,2) from $tb
+#sql_error select 2+mavg(c1,2) from $tb
+sql_error select mavg(c1+2) from $tb
+sql_error select mavg(c1,2) from $tb where ts > 0 and ts < now + 100m interval(10m)
+sql_error select mavg(c1,2) from $mt
+sql_error select mavg(mavg(c1,2)) from $tb
+sql_error select mavg(c1,2) from m_di_tb1 where c2 like '2%'
+
+
+print =============== step3
+sql select mavg(c1,2) from $tb where c1 > 5
+print ===> $data11
+if $data11 != 7.500000000 then
+ return -1
+endi
+sql select mavg(c2,2) from $tb where c2 > 5
+print ===> $data11
+if $data11 != 7.500000000 then
+ return -1
+endi
+sql select mavg(c3,2) from $tb where c3 > 5
+print ===> $data11
+if $data11 != 7.500000000 then
+ return -1
+endi
+sql select mavg(c4,2) from $tb where c4 > 5
+print ===> $data11
+if $data11 != 7.500000000 then
+ return -1
+endi
+sql select mavg(c5,2) from $tb where c5 > 5
+print ===> $data11
+if $data11 != 6.000000000 then
+ return -1
+endi
+sql select mavg(c6,2) from $tb where c6 > 5
+print ===> $data11
+if $data11 != 7.500000000 then
+ return -1
+endi
+
+print =============== step4
+sql select mavg(c1,2) from $tb where c1 > 5 and c2 < $rowNum
+print ===> $data11
+if $data11 != 7.500000000 then
+ return -1
+endi
+
+sql select mavg(c1,2) from $tb where c9 like '%9' and c1 <= 20
+if $rows != 1 then
+ return -1
+endi
+print ===> $data01
+if $data01 != 14.000000000 then
+ return -1
+endi
+
+print =============== step5
+sql select mavg(c1,2) as b from $tb interval(1m) -x step5
+ return -1
+step5:
+
+print =============== step6
+sql select mavg(c1,2) as b from $tb where ts < now + 4m interval(1m) -x step6
+ return -1
+step6:
+
+print =============== clear
+#sql drop database $db
+#sql show databases
+#if $rows != 0 then
+# return -1
+#endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/compute/sample.sim b/tests/script/general/compute/sample.sim
new file mode 100644
index 0000000000000000000000000000000000000000..0559d8c7253cfaa9b60e514408ed390562812538
--- /dev/null
+++ b/tests/script/general/compute/sample.sim
@@ -0,0 +1,165 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/exec.sh -n dnode1 -s start
+sleep 200
+sql connect
+
+$dbPrefix = m_db
+$tbPrefix = m_tb
+$mtPrefix = m_mt
+$tbNum = 10
+$rowNum = 20
+$totalNum = 200
+
+print =============== step1
+$i = 0
+$db = $dbPrefix . $i
+$mt = $mtPrefix . $i
+
+sql drop database $db -x step1
+step1:
+sql create database $db
+sql use $db
+sql create table $mt (ts timestamp, tbcol int, bin binary(43), nch nchar(43)) TAGS(tgcol int)
+
+$i = 0
+while $i < $tbNum
+ $tb = $tbPrefix . $i
+ sql create table $tb using $mt tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $cc = $x * 60000
+ $ms = 1601481600000 + $cc
+
+ sql insert into $tb values ($ms , $x , 'binary' , 'nchar' )
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+endw
+
+sleep 100
+
+print =============== step2
+$i = 1
+$tb = $tbPrefix . $i
+
+sql select sample(tbcol, 1) from $tb
+if $rows != 1 then
+ return -1
+endi
+if $data01 > 19 then
+ return -1
+endi
+sql select sample(bin, 1) from $tb
+if $rows != 1 then
+ return -1
+endi
+if $data01 != @binary@ then
+ return -1
+endi
+sql select sample(nch, 1) from $tb
+if $rows != 1 then
+ return -1
+endi
+if $data01 != @nchar@ then
+ return -1
+endi
+
+print =============== step3
+$cc = 4 * 60000
+$ms = 1601481600000 + $cc
+
+sql select sample(tbcol, 1) from $tb where ts <= $ms
+if $data01 > 4 then
+ return -1
+endi
+sql select sample(bin, 1) from $tb where ts <= $ms
+if $data01 != @binary@ then
+ return -1
+endi
+sql select sample(nch, 1) from $tb where ts <= $ms
+if $data01 != @nchar@ then
+ return -1
+endi
+
+print =============== step4
+sql select sample(tbcol, 1) as b from $tb
+if $data01 > 19 then
+ return -1
+endi
+
+sql select sample(bin, 1) as b from $tb
+
+print =============== step5
+sql select sample(tbcol, 2) as b from $tb
+if $rows != 2 then
+ return -1
+endi
+if $data01 > 19 then
+ return -1
+endi
+if $data11 > 19 then
+ return -1
+endi
+sql_error select sample(nchar, 2) as b from $tb
+sql select sample(nch, 2) as b from $tb
+if $rows != 2 then
+ return -1
+endi
+print =====> $data01 , $data11
+if $data01 != @nchar@ then
+ return -1
+endi
+if $data11 != @nchar@ then
+ return -1
+endi
+sql select sample(bin, 2) as b from $tb
+if $rows != 2 then
+ return -1
+endi
+if $data01 != @binary@ then
+ return -1
+endi
+if $data11 != @binary@ then
+ return -1
+endi
+
+print =============== step6
+$cc = 4 * 60000
+$ms = 1601481600000 + $cc
+
+sql select sample(tbcol, 2) as b from $tb where ts <= $ms
+if $rows != 2 then
+ return -1
+endi
+if $data01 > 4 then
+ return -1
+endi
+if $data11 > 4 then
+ return -1
+endi
+sql select sample(bin, 2) as b from $tb where ts <= $ms
+if $rows != 2 then
+ return -1
+endi
+sql select sample(nch, 2) as b from $tb where ts <= $ms
+if $rows != 2 then
+ return -1
+endi
+
+sql select sample(tbcol, 1001) as b from $tb -x step6
+ return -1
+step6:
+
+print =============== clear
+sql drop database $db
+sql show databases
+if $rows != 0 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/compute/testSuite.sim b/tests/script/general/compute/testSuite.sim
index 91bf4bf0cda54d300f4d284c9e057616d4d54abe..25c93ed29339c326628b885c34ed8766299460aa 100644
--- a/tests/script/general/compute/testSuite.sim
+++ b/tests/script/general/compute/testSuite.sim
@@ -3,6 +3,11 @@ run general/compute/bottom.sim
run general/compute/count.sim
run general/compute/diff.sim
run general/compute/diff2.sim
+run general/compute/csum.sim
+run general/compute/csum2.sim
+run general/compute/mavg.sim
+run general/compute/mavg2.sim
+run general/compute/sample.sim
run general/compute/first.sim
run general/compute/interval.sim
run general/compute/last.sim
diff --git a/tests/script/general/parser/col_arithmetic_operation.sim b/tests/script/general/parser/col_arithmetic_operation.sim
index 8bb692e3bbe8af3ec9ed179ad29d40b4712d257b..0989f977462015e98ac6f0b625137973474c03d1 100644
--- a/tests/script/general/parser/col_arithmetic_operation.sim
+++ b/tests/script/general/parser/col_arithmetic_operation.sim
@@ -124,8 +124,11 @@ sql select spread(ts )/(1000*3600*24) from $stb interval(1y)
sql_error select first(c1, c2) - last(c1, c2) from $stb interval(1y)
sql_error select first(ts) - last(ts) from $stb interval(1y)
sql_error select top(c1, 2) - last(c1) from $stb;
+sql_error select sample(c1, 2) - last(c1) from $stb;
sql_error select stddev(c1) - last(c1) from $stb;
sql_error select diff(c1) - last(c1) from $stb;
+sql_error select mavg(c1, 2) - last(c1) from $stb;
+sql_error select csum(c1) - last(c1) from $stb;
sql_error select first(c7) - last(c7) from $stb;
sql_error select first(c8) - last(c8) from $stb;
sql_error select first(c9) - last(c9) from $stb;
@@ -151,4 +154,4 @@ if $data02 != 225000 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/col_arithmetic_query.sim b/tests/script/general/parser/col_arithmetic_query.sim
index 17ae6cfd6b8b5636101e67e8d99f6999e50a06a5..502de9583e9727d2dbee4a5601f974d6a46173ba 100644
--- a/tests/script/general/parser/col_arithmetic_query.sim
+++ b/tests/script/general/parser/col_arithmetic_query.sim
@@ -174,6 +174,9 @@ endi
sql_error select top(c1, 1) - bottom(c1, 1) from $tb
sql_error select top(c1, 99) - bottom(c1, 99) from $tb
sql_error select top(c1,1) - 88 from $tb
+sql_error select sample(c1, 1) - bottom(c1, 1) from $tb
+sql_error select sample(c1, 99) - bottom(c1, 99) from $tb
+sql_error select sample(c1,1) - 88 from $tb
# all data types [d.6] ================================================================
sql select c2-c1*1.1, c3/c2, c4*c3, c5%c4, (c6+c4)%22, c2-c2 from $tb
@@ -475,11 +478,16 @@ endi
sql_error select first(c1, c2) - last(c1, c2) from $stb
sql_error select top(c1, 5) - bottom(c1, 5) from $stb
sql_error select first(*) - 99 from $stb
+sql_error select sample(c1, 5) - bottom(c1, 5) from $stb
+
# multi row result aggregation [d.4]
sql_error select top(c1, 1) - bottom(c1, 1) from $stb
sql_error select top(c1, 99) - bottom(c1, 99) from $stb
+sql_error select sample(c1, 1) - top(c1, 1) from $stb
+sql_error select sample(c1, 99) - top(c1, 99) from $stb
+
# query on super table [d.5]=============================================================
# all cases in this part are query on super table
diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim
index 556292b21b218f4df2aaa034d8babe35903a23b8..578234b2984a7d9440e4ea4390d8cb3a4580ab8d 100644
--- a/tests/script/general/parser/function.sim
+++ b/tests/script/general/parser/function.sim
@@ -1087,6 +1087,14 @@ sql select diff(val) from (select derivative(k, 1s, 0) val from t1);
if $rows != 0 then
return -1
endi
+sql select mavg(val,2) from (select derivative(k, 1s, 0) val from t1);
+if $rows != 0 then
+ return -1
+endi
+sql select csum(val) from (select derivative(k, 1s, 0) val from t1);
+if $rows != 0 then
+ return -1
+endi
sql insert into t1 values('2020-1-1 1:1:4', 20);
sql insert into t1 values('2020-1-1 1:1:6', 200);
diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim
index e063333853e04faf1a7f4988b6dd1f11207aee5d..cf3452d179a57eaade2492924513a425aed5870e 100644
--- a/tests/script/general/parser/having.sim
+++ b/tests/script/general/parser/having.sim
@@ -121,6 +121,7 @@ if $data31 != 4 then
return -1
endi
+sql_error select sample(f1,2) from st2 group by f1 having count(f2) > 0;
sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0;
sql select last(f1) from st2 group by f1 having count(f2) > 0;
@@ -140,9 +141,12 @@ if $data30 != 4 then
return -1
endi
-sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0;
-sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0;
-sql_error select top(f1,2) from st2 group by f1 having avg(f1) > 0;
+sql_error select sample(f1,2) from st2 group by f1 having count(f2) > 0;
+sql_error select sample(f1,2) from st2 group by f1 having count(f2) > 0;
+sql_error select sample(f1,2) from st2 group by f1 having avg(f1) > 0;
+sql_error select sample(f1,2) from st2 group by f1 having count(f2) > 0;
+sql_error select sample(f1,2) from st2 group by f1 having count(f2) > 0;
+sql_error select sample(f1,2) from st2 group by f1 having avg(f1) > 0;
sql select avg(f1),count(f1) from st2 group by f1 having avg(f1) > 2;
if $rows != 2 then
@@ -1059,6 +1063,13 @@ if $data26 != 4 then
endi
+sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having sample(f1,1);
+
+sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having sample(f1,1) > 1;
+
+sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),sample(f1,1) from st2 group by f1 having sum(f1) > 1;
+
+sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),sample(f1,1),bottom(f1,1) from st2 group by f1 having bottom(f1,1) > 1;
sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having top(f1,1);
@@ -1149,6 +1160,18 @@ sql_error select avg(f1),diff(f1) from st2 group by f1 having avg(f1) > 0;
sql_error select avg(f1),diff(f1) from st2 group by f1 having spread(f2) > 0;
+sql_error select avg(f1) from st2 group by f1 having mavg(f1, 2) > 0;
+
+sql_error select avg(f1),mavg(f1, 3) from st2 group by f1 having avg(f1) > 0;
+
+sql_error select avg(f1),mavg(f1, 4) from st2 group by f1 having spread(f2) > 0;
+
+sql_error select avg(f1) from st2 group by f1 having csum(f1) > 0;
+
+sql_error select avg(f1),csum(f1) from st2 group by f1 having avg(f1) > 0;
+
+sql_error select avg(f1),csum(f1) from st2 group by f1 having spread(f2) > 0;
+
sql select avg(f1) from st2 group by f1 having spread(f2) > 0;
if $rows != 0 then
return -1
@@ -1834,6 +1857,7 @@ if $data04 != 1 then
return -1
endi
+sql_error select sample(f1,2) from tb1 group by f1 having count(f1) > 0;
sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0;
sql_error select count(*) from tb1 group by f1 having last(*) > 0;
diff --git a/tests/script/general/parser/having_child.sim b/tests/script/general/parser/having_child.sim
index 0fe5448869a5720a62550a88981114e737e4965b..ff7b786638006fb862ab0e22b2c8e6c6fb65902e 100644
--- a/tests/script/general/parser/having_child.sim
+++ b/tests/script/general/parser/having_child.sim
@@ -120,6 +120,7 @@ if $data31 != 4 then
endi
sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0;
+sql_error select sample(f1,2) from tb1 group by f1 having count(f2) > 0;
sql select last(f1) from tb1 group by f1 having count(f2) > 0;
if $rows != 4 then
@@ -144,6 +145,12 @@ sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0;
sql_error select top(f1,2) from tb1 group by f1 having avg(f1) > 0;
+sql_error select sample(f1,2) from tb1 group by f1 having count(f2) > 0;
+
+sql_error select sample(f1,2) from tb1 group by f1 having count(f2) > 0;
+
+sql_error select sample(f1,2) from tb1 group by f1 having avg(f1) > 0;
+
sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2;
if $rows != 2 then
return -1
@@ -1067,7 +1074,13 @@ if $data26 != 4 then
return -1
endi
+sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having sample(f1,1);
+
+sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having sample(f1,1) > 1;
+sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),sample(f1,1),bottom(f1,1) from tb1 group by f1 having bottom(f1,1) > 1;
+
+sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),sample(f1,1),bottom(f1,1) from tb1 group by f1 having sum(f1) > 1;
sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having top(f1,1);
@@ -1164,6 +1177,20 @@ sql_error select avg(f1),diff(f1) from tb1 group by f1 having avg(f1) > 0;
sql_error select avg(f1),diff(f1) from tb1 group by f1 having spread(f2) > 0;
+
+sql_error select avg(f1) from tb1 group by f1 having mavg(f1,4) > 0;
+
+sql_error select avg(f1),mavg(f1,5) from tb1 group by f1 having avg(f1) > 0;
+
+sql_error select avg(f1),mavg(f1,6) from tb1 group by f1 having spread(f2) > 0;
+
+
+sql_error select avg(f1) from tb1 group by f1 having csum(f1) > 0;
+
+sql_error select avg(f1),csum(f1) from tb1 group by f1 having avg(f1) > 0;
+
+sql_error select avg(f1),csum(f1) from tb1 group by f1 having spread(f2) > 0;
+
sql select avg(f1) from tb1 group by f1 having spread(f2) > 0;
if $rows != 0 then
return -1
@@ -1857,4 +1884,6 @@ endi
sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0;
+sql_error select sample(f1,2) from tb1 group by f1 having count(f1) > 0;
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim
index 3af2cb301854b27bc1b9c33bf8b06cbd17e87fd3..00ebc7601386e1a19cd43253794f891441e87fe3 100644
--- a/tests/script/general/parser/limit.sim
+++ b/tests/script/general/parser/limit.sim
@@ -80,4 +80,7 @@ sql use $db
sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1)
sql select * from (select ts, top(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1)
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+sql select * from (select ts, sample(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1)
+sql_error select * from (select ts, sample(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1)
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/limit1_tb.sim b/tests/script/general/parser/limit1_tb.sim
index 300af7ac7b669088094c0ba72288f42d34ca374d..9c96897da89e5e2b4c3f66f30f53d5ebf674c660 100644
--- a/tests/script/general/parser/limit1_tb.sim
+++ b/tests/script/general/parser/limit1_tb.sim
@@ -471,6 +471,92 @@ if $data81 != -9 then
return -1
endi
+sql select mavg(c1,2) from $tb
+$res = $rowNum - 1
+if $rows != $res then
+ return -1
+endi
+
+sql select mavg(c1,2) from $tb where c1 > 5 limit 2 offset 1
+print $rows , $data00 , $data01 , $data10 , $data11
+if $rows != 2 then
+ return -1
+endi
+if $data00 != @18-09-17 10:20:00.000@ then
+ return -1
+endi
+if $data01 != 7.500000000 then
+ return -1
+endi
+if $data10 != @18-09-17 10:30:00.000@ then
+ return -1
+endi
+if $data11 != 8.500000000 then
+ return -1
+endi
+$limit = $rowNum / 2
+$offset = $limit - 1
+sql select mavg(c1,2) from $tb where c1 >= 0 limit $limit offset $offset
+if $rows != $limit then
+ return -1
+endi
+$limit = $rowNum / 2
+$offset = $limit + 1
+$val = $limit - 2
+sql select mavg(c1,2) from $tb where c1 >= 0 limit $limit offset $offset
+print $rows , $data01 , $data81
+if $rows != $val then
+ return -1
+endi
+if $data01 != 1.500000000 then
+ return -1
+endi
+if $data81 != 4.500000000 then
+ return -1
+endi
+
+sql select csum(c1) from $tb
+$res = $rowNum
+if $rows != $res then
+ return -1
+endi
+
+sql select csum(c1) from $tb where c1 > 5 limit 2 offset 1
+if $rows != 2 then
+ return -1
+endi
+if $data00 != @18-09-17 10:10:00.000@ then
+ return -1
+endi
+if $data01 != 13 then
+ return -1
+endi
+if $data10 != @18-09-17 10:20:00.000@ then
+ return -1
+endi
+if $data11 != 21 then
+ return -1
+endi
+$limit = $rowNum / 2
+$offset = $limit - 1
+sql select csum(c1) from $tb where c1 >= 0 limit $limit offset $offset
+if $rows != $limit then
+ return -1
+endi
+$limit = $rowNum / 2
+$offset = $limit + 1
+$val = $limit - 1
+sql select csum(c1) from $tb where c1 >= 0 limit $limit offset $offset
+if $rows != $val then
+ return -1
+endi
+if $data01 != 22501 then
+ return -1
+endi
+if $data81 != 22545 then
+ return -1
+endi
+
### aggregation + limit offset (with interval)
sql select max(c1), max(c2), max(c3), max(c4), max(c5), max(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) limit 5
if $rows != 5 then
diff --git a/tests/script/general/parser/limit_stb.sim b/tests/script/general/parser/limit_stb.sim
index ec7c0e0f138e677c7da95c20af4bd13908aa1a0c..2e6c10cd96db8536e12acf57bf9283eb20f59d1b 100644
--- a/tests/script/general/parser/limit_stb.sim
+++ b/tests/script/general/parser/limit_stb.sim
@@ -828,6 +828,8 @@ if $data59 != 4 then
return -1
endi
+sql_error select sample(c1, 1) from $stb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
+
sql select top(c1, 1) from $stb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
if $rows != 0 then
return -1
diff --git a/tests/script/general/parser/limit_tb.sim b/tests/script/general/parser/limit_tb.sim
index 4a93797d40fb65a7df9ad8d18c60292bed83dfe4..f130214ddbed895d29ed0dba08a93003cee6e32b 100644
--- a/tests/script/general/parser/limit_tb.sim
+++ b/tests/script/general/parser/limit_tb.sim
@@ -355,6 +355,21 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
if $rows != 0 then
return -1
endi
+sql select sample(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
+if $rows != 0 then
+ return -1
+endi
+
+sql select * from (select ts, sample(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1)
+
+sql select ts,sample(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
+if $rows != 3 then
+ return -1
+endi
+print select ts,sample(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
+print $data00 $data01 $data02
+print $data10 $data11 $data12
+print $data20 $data21 $data22
print ========> TD-6017
sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1)
@@ -463,6 +478,35 @@ if $data11 != 1 then
return -1
endi
+sql select mavg(c1,3) from $tb where c1 > 5 limit 2 offset 1
+print $rows , $data00 , $data01
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @18-09-17 10:30:00.000@ then
+ return -1
+endi
+if $data01 != 8.000000000 then
+ return -1
+endi
+
+sql select csum(c1) from $tb where c1 > 5 limit 2 offset 1
+if $rows != 2 then
+ return -1
+endi
+if $data00 != @18-09-17 10:10:00.000@ then
+ return -1
+endi
+if $data01 != 13 then
+ return -1
+endi
+if $data10 != @18-09-17 10:20:00.000@ then
+ return -1
+endi
+if $data11 != 21 then
+ return -1
+endi
+
### aggregation + limit offset (with interval)
sql select max(c1), max(c2), max(c3), max(c4), max(c5), max(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) limit 5
if $rows != 5 then
diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim
index 3c1ba0336973b8d07c785337de2d2c66202520c4..f2c539dbf8b8bd68c6481e790198a28d860f0b92 100644
--- a/tests/script/general/parser/nestquery.sim
+++ b/tests/script/general/parser/nestquery.sim
@@ -186,6 +186,8 @@ sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0);
sql_error select twa(c1) from (select c1 from nest_tb0);
sql_error select irate(c1) from (select c1 from nest_tb0);
sql_error select diff(c1), twa(c1) from (select * from nest_tb0);
+sql_error select mavg(c1,2), twa(c1) from (select * from nest_tb0);
+sql_error select csum(c1), twa(c1) from (select * from nest_tb0);
sql_error select irate(c1), interp(c1), twa(c1) from (select * from nest_tb0);
sql select apercentile(c1, 50) from (select * from nest_tb0) interval(1d)
@@ -273,6 +275,14 @@ sql select diff(c1) from (select * from nest_tb0);
if $rows != 9999 then
return -1
endi
+sql select mavg(c1,2) from (select * from nest_tb0);
+if $rows != 9999 then
+ return -1
+endi
+sql select csum(c1) from (select * from nest_tb0);
+if $rows != 10000 then
+ return -1
+endi
sql select avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d);
if $rows != 7 then
@@ -330,6 +340,8 @@ if $data12 != 71680.000000000 then
return -1
endi
+sql select sample(x, 20) from (select c1 x from nest_tb0);
+
sql select top(x, 20) from (select c1 x from nest_tb0);
sql select bottom(x, 20) from (select c1 x from nest_tb0)
@@ -420,6 +432,35 @@ if $data01 != 1 then
return -1
endi
+sql select mavg(val, 2) from (select c1 val from nest_tb0);
+if $rows != 9999 then
+ return -1
+endi
+
+if $data00 != @70-01-01 08:00:00.000@ then
+ return -1
+endi
+if $data01 != 0.500000000 then
+ return -1
+endi
+
+sql select csum(val) from (select c1 val from nest_tb0);
+if $rows != 10000 then
+ return -1
+endi
+
+if $data00 != @70-01-01 08:00:00.000@ then
+ return -1
+endi
+
+if $data01 != 0 then
+ return -1
+endi
+
+if $data41 != 10 then
+ return -1
+endi
+
sql_error select last_row(*) from (select * from nest_tb0) having c1 > 0
print ===========>td-4805
@@ -508,4 +549,4 @@ if $data11 != 2.000000000 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/projection_limit_offset.sim b/tests/script/general/parser/projection_limit_offset.sim
index ffbcb28ffd9b4e15f707509dc5cc808ef3f8ce4a..a44d6782cecd6999eb887b574df944358f90faf7 100644
--- a/tests/script/general/parser/projection_limit_offset.sim
+++ b/tests/script/general/parser/projection_limit_offset.sim
@@ -296,6 +296,7 @@ sql_error select last(t1) from group_mt0;
sql_error select min(t1) from group_mt0;
sql_error select max(t1) from group_mt0;
sql_error select top(t1, 20) from group_mt0;
+sql_error select sample(t1, 20) from group_mt0;
sql_error select bottom(t1, 20) from group_mt0;
sql_error select avg(t1) from group_mt0;
sql_error select percentile(t1, 50) from group_mt0;
@@ -393,6 +394,25 @@ if $data21 != -1 then
return -1
endi
+sql select mavg(k,3) from tm0
+print ====> $rows , $data21
+if $row != 2 then
+ return -1
+endi
+if $data11 != 2.333333333 then
+ return -1
+endi
+
+sql select csum(k) from tm0
+print ====> $rows , $data21
+if $row != 4 then
+ return -1
+endi
+
+if $data21 != 6 then
+ return -1
+endi
+
#error sql
sql_error select * from 1;
#sql_error select 1; // equals to select server_status();
diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim
index eb6cd75d2104f7ff61b5f5e5bccc12fdd239d3d5..195eca928fa4ddbf3795ae3e40f973ea0a5e8def 100644
--- a/tests/script/general/parser/select_with_tags.sim
+++ b/tests/script/general/parser/select_with_tags.sim
@@ -181,6 +181,12 @@ if $data03 != @abc15@ then
return -1
endi
+sql_error select sample(c6, 3) from select_tags_mt0 interval(10a)
+sql select sample(c3,10) from select_tags_mt0 interval(10a) group by tbname,t1,t2
+sql select sample(c6, 3) from select_tags_mt0 interval(10a) group by tbname;
+sql_error select sample(c6, 10) from select_tags_mt0 interval(10a);
+sql_error select sample(c1, 80), tbname, t1, t2 from select_tags_mt0;
+
sql select top(c6, 3) from select_tags_mt0 interval(10a)
sql select top(c3,10) from select_tags_mt0 interval(10a) group by tbname,t1,t2
sql select top(c6, 3) from select_tags_mt0 interval(10a) group by tbname;
@@ -418,6 +424,11 @@ if $data11 != @70-01-01 08:01:40.001@ then
return -1
endi
+sql select sample(c1, 100), tbname, t1, t2 from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by tbname;
+if $rows != 200 then
+ return -1
+endi
+
sql select top(c1, 100), tbname, t1, t2 from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by tbname;
if $row != 200 then
return -1
@@ -455,6 +466,11 @@ if $data04 != @abc0@ then
return -1
endi
+sql select sample(c1, 2), t2 from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by tbname,t2;
+if $rows != 4 then
+ return -1
+endi
+
sql select top(c1, 2), t2 from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by tbname,t2;
if $row != 4 then
return -1
@@ -542,6 +558,11 @@ endi
# slimit /limit
+sql select sample(c1, 2), t2 from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by tbname,t2 limit 2 offset 1;
+if $rows != 2 then
+ return -1
+endi
+
sql select top(c1, 2), t2 from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by tbname,t2 limit 2 offset 1;
if $row != 2 then
return -1
@@ -715,6 +736,11 @@ if $data25 != @select_tags_tb2@ then
return -1
endi
+sql select sample(c1, 5), t2 from select_tags_mt0 where c1<=2 interval(1d) group by tbname;
+if $row != 15 then
+ return -1
+endi
+
sql select top(c1, 5), t2 from select_tags_mt0 where c1<=2 interval(1d) group by tbname;
if $row != 15 then
return -1
@@ -753,6 +779,11 @@ if $data93 != @select_tags_tb1@ then
endi
#if data
+sql select sample(c1, 50), t2, t1, tbname from select_tags_mt0 where c1<=2 interval(1d) group by tbname;
+if $row != 48 then
+ return -1
+endi
+
sql select top(c1, 50), t2, t1, tbname from select_tags_mt0 where c1<=2 interval(1d) group by tbname;
if $row != 48 then
return -1
@@ -838,6 +869,8 @@ endi
print TODO ======= selectivity + tags+ group by + tags + filter + interval + join===========
print ==========================mix tag columns and group by columns======================
+sql_error select sample(c1, 100), tbname from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by t3
+
sql select top(c1, 100), tbname from select_tags_mt0 where tbname in ('select_tags_tb0', 'select_tags_tb1') group by t3
if $rows != 100 then
return -1
diff --git a/tests/script/general/parser/udf_dll.sim b/tests/script/general/parser/udf_dll.sim
index 0f9436762adb645785ddcf9a4abaf4a5be810a34..7168e0a5ddf5502170e6bb22f30b10621795a568 100644
--- a/tests/script/general/parser/udf_dll.sim
+++ b/tests/script/general/parser/udf_dll.sim
@@ -489,6 +489,7 @@ sql_error select ts,sum_double(f1),f1 from tb1;
sql_error select add_one(f1),count(f1) from tb1;
sql_error select sum_double(f1),count(f1) from tb1;
sql_error select add_one(f1),top(f1,3) from tb1;
+sql_error select add_one(f1),sample(f1,3) from tb1;
sql_error select add_one(f1) from tb1 interval(10a);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/udf_dll_stable.sim b/tests/script/general/parser/udf_dll_stable.sim
index b8da57467e912ff27f4fbda7226c75e089f04808..15becaab22476d12829abc62db4de4f914eef271 100644
--- a/tests/script/general/parser/udf_dll_stable.sim
+++ b/tests/script/general/parser/udf_dll_stable.sim
@@ -508,6 +508,7 @@ sql_error select ts,sum_double(f1),f1 from tb1;
sql_error select add_one(f1),count(f1) from tb1;
sql_error select sum_double(f1),count(f1) from tb1;
sql_error select add_one(f1),top(f1,3) from tb1;
+sql_error select add_one(f1),sample(f1,3) from tb1;
sql_error select add_one(f1) from tb1 interval(10a);
diff --git a/tests/script/regressionSuite.sim b/tests/script/regressionSuite.sim
index bada2f655202ddc34ce6e67e718336a2afc41d50..faa6672b42be666d17bafe5a6176d95cdbbc27a8 100644
--- a/tests/script/regressionSuite.sim
+++ b/tests/script/regressionSuite.sim
@@ -21,6 +21,11 @@ run general/compute/bottom.sim
run general/compute/count.sim
run general/compute/diff.sim
run general/compute/diff2.sim
+run general/compute/mavg.sim
+run general/compute/mavg2.sim
+run general/compute/sample.sim
+run general/compute/csum.sim
+run general/compute/csum2.sim
run general/compute/first.sim
run general/compute/interval.sim
run general/compute/last.sim