diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index e7802b3d0d3cf969381b5fa2a99862b3df5aa05f..0000000000000000000000000000000000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,49 +0,0 @@ -version: 1.0.{build} -image: - - Visual Studio 2015 - - macos -environment: - matrix: - - ARCH: amd64 - - ARCH: x86 -matrix: - exclude: - - image: macos - ARCH: x86 -for: - - - matrix: - only: - - image: Visual Studio 2015 - clone_folder: c:\dev\TDengine - clone_depth: 1 - - init: - - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH% - - before_build: - - cd c:\dev\TDengine - - md build - - build_script: - - cd build - - cmake -G "NMake Makefiles" .. -DBUILD_JDBC=false - - nmake install - - - matrix: - only: - - image: macos - clone_depth: 1 - - build_script: - - mkdir debug - - cd debug - - cmake .. > /dev/null - - make > /dev/null -notifications: -- provider: Email - to: - - sangshuduo@gmail.com - on_build_success: true - on_build_failure: true - on_build_status_changed: true diff --git a/CMakeLists.txt b/CMakeLists.txt index 547455d07b6ba25ac58ae5e4851c5cd5b08e3c60..2be886257e1f92155ff775d2da48ada3838fc826 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -50,7 +50,6 @@ SET(TD_MEM_CHECK FALSE) SET(TD_PAGMODE_LITE FALSE) SET(TD_SOMODE_STATIC FALSE) -SET(TD_POWER FALSE) SET(TD_GODLL FALSE) SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR}) diff --git a/Jenkinsfile b/Jenkinsfile index 5793a9043489dcc98d9426cac66ebea83d48f2ce..15640f81393f5544cf16d75fa72da062dbbda8b0 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -78,6 +78,8 @@ def pre_test(){ git checkout -qf FETCH_HEAD git clean -dfx git submodule update --init --recursive + cd src/kit/taos-tools/deps/avro + git clean -dfx cd ${WK} git reset --hard HEAD~10 ''' @@ -154,6 +156,8 @@ def pre_test_noinstall(){ git checkout -qf FETCH_HEAD git clean -dfx git submodule update --init --recursive + cd src/kit/taos-tools/deps/avro + git clean -dfx cd ${WK} git reset --hard HEAD~10 ''' @@ -186,7 +190,7 @@ def pre_test_noinstall(){ git clean -dfx mkdir debug cd debug - cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=false > /dev/null + cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null make ''' return 1 @@ -227,6 +231,8 @@ def pre_test_mac(){ git checkout -qf FETCH_HEAD git clean -dfx git submodule update --init --recursive + cd src/kit/taos-tools/deps/avro + git clean -dfx cd ${WK} git reset --hard HEAD~10 ''' @@ -355,7 +361,7 @@ pipeline { } stages { stage('pre_build'){ - agent{label 'catalina'} + agent{label 'master'} options { skipDefaultCheckout() } when { changeRequest() @@ -364,36 +370,11 @@ pipeline { script{ abort_previous() abortPreviousBuilds() - println env.CHANGE_BRANCH - if(env.CHANGE_FORK){ - scope = ['connector','query','insert','other','tools','taosAdapter'] - } - else{ - sh''' - cd ${WKC} - git reset --hard HEAD~10 - git fetch - git checkout ${CHANGE_BRANCH} - git pull - ''' - dir('/var/lib/jenkins/workspace/TDinternal/community'){ - gitlog = sh(script: "git log -1 --pretty=%B ", returnStdout:true) - println gitlog - if (!(gitlog =~ /\((.*?)\)/)){ - autoCancelled = true - error('Please fill in the scope information correctly.\neg. [TD-xxxx](query,insert):xxxxxxxxxxxxxxxxxx ') - } - temp = (gitlog =~ /\((.*?)\)/) - temp = temp[0].remove(1) - scope = temp.split(",") - scope = ['connector','query','insert','other','tools','taosAdapter'] - Collections.shuffle mod - Collections.shuffle sim_mod - } - + scope = ['connector','query','insert','other','tools','taosAdapter'] + Collections.shuffle mod + Collections.shuffle sim_mod } } - } } stage('Parallel test stage') { //only build pr diff --git a/cmake/define.inc b/cmake/define.inc index cc0f6070f6b0c5631484444b9b68389afaf66ed6..a15a0725ebcc04683cee3559e69cf667a060fc7d 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -41,26 +41,6 @@ IF (TD_GODLL) ADD_DEFINITIONS(-D_TD_GO_DLL_) ENDIF () -IF (TD_POWER) - ADD_DEFINITIONS(-D_TD_POWER_) -ENDIF () - -IF (TD_TQ) - ADD_DEFINITIONS(-D_TD_TQ_) -ENDIF () - -IF (TD_PRO) - ADD_DEFINITIONS(-D_TD_PRO_) -ENDIF () - -IF (TD_KH) - ADD_DEFINITIONS(-D_TD_KH_) -ENDIF () - -IF (TD_JH) - ADD_DEFINITIONS(-D_TD_JH_) -ENDIF () - IF (TD_MEM_CHECK) ADD_DEFINITIONS(-DTAOS_MEM_CHECK) ENDIF () diff --git a/cmake/input.inc b/cmake/input.inc index 14ac795e7edaef2569ea4a01b4da5e3251e6d7ff..bc79de48a482539660e6166b642144d754fc94a4 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -43,23 +43,6 @@ IF (${SOMODE} MATCHES "static") MESSAGE(STATUS "Link so using static mode") ENDIF () -IF (${DBNAME} MATCHES "power") - SET(TD_POWER TRUE) - MESSAGE(STATUS "power is true") -ELSEIF (${DBNAME} MATCHES "tq") - SET(TD_TQ TRUE) - MESSAGE(STATUS "tq is true") -ELSEIF (${DBNAME} MATCHES "pro") - SET(TD_PRO TRUE) - MESSAGE(STATUS "pro is true") -ELSEIF (${DBNAME} MATCHES "kh") - SET(TD_KH TRUE) - MESSAGE(STATUS "kh is true") -ELSEIF (${DBNAME} MATCHES "jh") - SET(TD_JH TRUE) - MESSAGE(STATUS "jh is true") -ENDIF () - IF (${DLLTYPE} MATCHES "go") SET(TD_GODLL TRUE) MESSAGE(STATUS "input dll type: " ${DLLTYPE}) diff --git a/cmake/install.inc b/cmake/install.inc index 283d6a9c045c2a14dd18cd82d4fabb47f24466ee..b1cf7b3f9dc3fd0e559a65fd4a04eeb780b164fb 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -3,48 +3,21 @@ IF (TD_LINUX) INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})") ELSEIF (TD_WINDOWS) - IF (TD_POWER) - SET(CMAKE_INSTALL_PREFIX C:/PowerDB) - ELSEIF (TD_TQ) - SET(CMAKE_INSTALL_PREFIX C:/TQueue) - ELSEIF (TD_PRO) - SET(CMAKE_INSTALL_PREFIX C:/ProDB) - ELSEIF (TD_KH) - SET(CMAKE_INSTALL_PREFIX C:/KingHistorian) - ELSEIF (TD_JH) - SET(CMAKE_INSTALL_PREFIX C:/jh_iot) - ELSE () - SET(CMAKE_INSTALL_PREFIX C:/TDengine) - ENDIF () + SET(CMAKE_INSTALL_PREFIX C:/TDengine) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/C\# DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) + INSTALL(FILES ${TD_COMMUNITY_DIR}/packaging/cfg/taos.cfg DESTINATION cfg) INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taoserror.h DESTINATION include) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) - IF (TD_POWER) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .) - ELSEIF (TD_TQ) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/tq.exe DESTINATION .) - ELSEIF (TD_PRO) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/prodbc.exe DESTINATION .) - ELSEIF (TD_KH) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/khclient.exe DESTINATION .) - ELSEIF (TD_JH) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/jh_taos.exe DESTINATION .) - ELSE () - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) - ENDIF () - - #INSTALL(TARGETS taos RUNTIME DESTINATION driver) - #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.37-dist.jar DESTINATION connector/jdbc) ENDIF () diff --git a/cmake/platform.inc b/cmake/platform.inc index 2a0aace8d08e9dba1451daa051df4b614a21d398..b0e463026ef64d3ce662911001daa17488dfe321 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -96,10 +96,12 @@ IF ("${CPUTYPE}" STREQUAL "") MESSAGE(STATUS "The current platform is amd64") MESSAGE(STATUS "Set CPUTYPE to x64") SET(CPUTYPE "x64") + SET(PLATFORM_ARCH_STR "amd64") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)") MESSAGE(STATUS "The current platform is x86") MESSAGE(STATUS "Set CPUTYPE to x86") SET(CPUTYPE "x32") + SET(PLATFORM_ARCH_STR "i386") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l") MESSAGE(STATUS "Set CPUTYPE to aarch32") SET(CPUTYPE "aarch32") @@ -107,12 +109,14 @@ IF ("${CPUTYPE}" STREQUAL "") SET(TD_LINUX TRUE) SET(TD_LINUX_32 FALSE) SET(TD_ARM_32 TRUE) + SET(PLATFORM_ARCH_STR "arm") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") SET(CPUTYPE "aarch64") MESSAGE(STATUS "Set CPUTYPE to aarch64") SET(TD_LINUX TRUE) SET(TD_LINUX_64 FALSE) SET(TD_ARM_64 TRUE) + SET(PLATFORM_ARCH_STR "arm64") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64") SET(CPUTYPE "mips64") MESSAGE(STATUS "Set CPUTYPE to mips64") @@ -124,7 +128,6 @@ IF ("${CPUTYPE}" STREQUAL "") MESSAGE(STATUS "Set CPUTYPE to apple silicon m1") SET(TD_ARM_64 TRUE) ENDIF () - ELSE () # if generate ARM version: # cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64 @@ -132,27 +135,33 @@ ELSE () SET(TD_LINUX TRUE) SET(TD_LINUX_32 FALSE) SET(TD_ARM_32 TRUE) + SET(PLATFORM_ARCH_STR "arm") MESSAGE(STATUS "input cpuType: aarch32") ELSEIF (${CPUTYPE} MATCHES "aarch64") SET(TD_LINUX TRUE) SET(TD_LINUX_64 FALSE) SET(TD_ARM_64 TRUE) + SET(PLATFORM_ARCH_STR "arm64") MESSAGE(STATUS "input cpuType: aarch64") ELSEIF (${CPUTYPE} MATCHES "mips64") SET(TD_LINUX TRUE) SET(TD_LINUX_64 FALSE) SET(TD_MIPS_64 TRUE) + SET(PLATFORM_ARCH_STR "mips") MESSAGE(STATUS "input cpuType: mips64") ELSEIF (${CPUTYPE} MATCHES "x64") + SET(PLATFORM_ARCH_STR "amd64") MESSAGE(STATUS "input cpuType: x64") ELSEIF (${CPUTYPE} MATCHES "x86") + SET(PLATFORM_ARCH_STR "i386") MESSAGE(STATUS "input cpuType: x86") ELSE () MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE}) ENDIF () - ENDIF () +MESSAGE(STATUS "platform arch:" ${PLATFORM_ARCH_STR}) + # cmake -DOSTYPE=Ningsi IF (${OSTYPE} MATCHES "Ningsi60") SET(TD_NINGSI TRUE) diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index 3587138544ba36aed3417fe7fd6f59b6b7049e2d..a81dc45ec5111150c8a7b900f79a7beec4ceb21e 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -1,6 +1,6 @@ # TDengine文档 -TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](/architecture) 与 [数据建模](/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。如需查阅TDengine 1.6 文档,请点击 [这里](https://www.taosdata.com/cn/documentation16/) 访问。 +TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](/architecture) 与 [数据建模](/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。 ## [TDengine介绍](/evaluation) @@ -69,7 +69,6 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算 * [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):类似典型的消息队列,应用可订阅接收到的最新数据 * [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取 -* [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送 ## [连接器](/connector) @@ -85,9 +84,10 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ## TDengine 组件与工具 -* [taosAdapter 用户手册](/tools/adapter) -* [TDinsight 用户手册](/tools/insight) -* [taoTools 用户手册](/tools/taos-tools) +* [taosAdapter](/tools/adapter): TDengine 集群和应用之间的 RESTful 接口适配服务。 +* [TDinsight](/tools/insight): 监控 TDengine 集群的 Grafana 面板集合。 +* [taosdump](/tools/taosdump): TDengine 数据备份工具。使用 taosdump 请安装 taosTools。 +* [taosBenchmark](/tools/taosbenchmark): TDengine 压力测试工具。使用 taosBenchmark 请安装 taosTools。 ## [与其他工具的连接](/connections) diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md index cab6d878991a315f79b7fc0813e3727b6e8720dd..1ba593b12ecc3a5eac5ab6af64a2507d561cbd98 100644 --- a/documentation20/cn/02.getting-started/01.docker/docs.md +++ b/documentation20/cn/02.getting-started/01.docker/docs.md @@ -20,11 +20,11 @@ Docker version 20.10.3, build 48d30b5 ### 在 Docker 容器中运行 TDengine server ```bash -$ docker run -d -p 6030-6041:6030-6041 -p 6030-6041:6030-6041/udp tdengine/tdengine +$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd ``` -这条命令,启动一个运行了 TDengine server 的 docker 容器,并且将容器的 6030 到 6041 端口映射到宿主机的 6030 到 6041 端口上。如果宿主机已经运行了 TDengine server 并占用了相同端口,需要映射容器的端口到不同的未使用端口段。(详情参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。为了支持 TDengine 客户端操作 TDengine server 服务, TCP 和 UDP 端口都需要打开。 +这条命令,启动一个运行了 TDengine server 的 docker 容器,并且将容器的 6030 到 6049 端口映射到宿主机的 6030 到 6049 端口上。如果宿主机已经运行了 TDengine server 并占用了相同端口,需要映射容器的端口到不同的未使用端口段。(详情参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。为了支持 TDengine 客户端操作 TDengine server 服务, TCP 和 UDP 端口都需要打开。 - **docker run**:通过 Docker 运行一个容器 - **-d**:让容器在后台运行 @@ -32,14 +32,14 @@ $ docker run -d -p 6030-6041:6030-6041 -p 6030-6041:6030-6041/udp tdengine/tdeng - **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像 - **526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器 -进一步,还可以使用 docker run 命令启动运行 TDengine server 的 docker 容器,并使用 --name 命令行参数将容器命名为 tdengine,使用 --hostname 指定 hostname 为 tdengine-server,通过 -v 挂载本地目录(-v),实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。 +进一步,还可以使用 docker run 命令启动运行 TDengine server 的 docker 容器,并使用 `--name` 命令行参数将容器命名为 `tdengine`,使用 `--hostname` 指定 hostname 为 `tdengine-server`,通过 `-v` 挂载本地目录到容器,实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。 -``` -$ docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6041:6030-6041 -p 6030-6041:6030-6041/udp tdengine/tdengine +```bash +$ docker run -d --name tdengine --hostname="tdengine-server" -v ~/work/taos/log:/var/log/taos -v ~/work/taos/data:/var/lib/taos -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine ``` - **--name tdengine**:设置容器名称,我们可以通过容器名称来访问对应的容器 -- **--hostnamename=tdengine-server**:设置容器内 Linux 系统的 hostname,我们可以通过映射 hostname 和 IP 来解决容器 IP 可能变化的问题。 +- **--hostname=tdengine-server**:设置容器内 Linux 系统的 hostname,我们可以通过映射 hostname 和 IP 来解决容器 IP 可能变化的问题。 - **-v**:设置宿主机文件目录映射到容器内目录,避免容器删除后数据丢失。 ### 使用 docker ps 命令确认容器是否已经正确运行 @@ -61,7 +61,7 @@ c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes · ```bash $ docker exec -it tdengine /bin/bash -root@tdengine-server:~/TDengine-server-2.0.20.13# +root@tdengine-server:~/TDengine-server-2.4.0.4# ``` - **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。 @@ -73,12 +73,12 @@ root@tdengine-server:~/TDengine-server-2.0.20.13# 进入容器后,执行 taos shell 客户端程序。 ```bash -root@tdengine-server:~/TDengine-server-2.0.20.13# taos +root@tdengine-server:~/TDengine-server-2.4.0.4# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. -taos> +taos> ``` TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。 @@ -93,7 +93,7 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息 ``` $ taos -Welcome to the TDengine shell from Linux, Client Version:2.0.22.3 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> @@ -110,20 +110,95 @@ $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql TDengine RESTful 接口详情请参考[官方文档](https://www.taosdata.com/cn/documentation/connector#restful)。 - ### 使用 Docker 容器运行 TDengine server 和 taosAdapter -在 TDegnine 2.4.0.0 之后版本的 Docker 容器,开始一个组件 taosAdapter,taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。 +在 TDegnine 2.4.0.0 之后版本的 Docker 容器,开始提供一个独立运行的组件 taosAdapter,代替之前版本 TDengine 中 taosd 进程中内置的 http server。taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。在新版本 Docker 镜像中,默认启用了 taosAdapter,也可以使用 docker run 命令中设置 TAOS_DISABLE_ADAPTER=true 来禁用 taosAdapter;也可以在 docker run 命令中单独使用taosAdapter,而不运行 taosd 。 -注意:如果容器中运行 taosAdapter,需要根据需要增加映射其他端口,具体端口默认配置和修改方法请参考[taosAdapter文档](https://github.com/taosdata/taosadapter/blob/develop/README-CN.md)。 +注意:如果容器中运行 taosAdapter,需要根据需要映射其他端口,具体端口默认配置和修改方法请参考[taosAdapter文档](https://github.com/taosdata/taosadapter/blob/develop/README-CN.md)。 -使用 docker 运行 TDengine 2.4.0.0 版本镜像: +使用 docker 运行 TDengine 2.4.0.4 版本镜像(taosd + taosAdapter): +```bash +$ docker run -d --name tdengine-all -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 ``` -$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.0 + +使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosAdapter,需要设置 firstEp 配置项 或 TAOS_FIRST_EP 环境变量): + +```bash +$ docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp -e TAOS_FIRST_EP=tdengine-all tdengine/tdengine:2.4.0.4 taosadapter +``` + +使用 docker 运行 TDengine 2.4.0.4 版本镜像(仅 taosd): + +```bash +$ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-6042/udp -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:2.4.0.4 ``` 使用 curl 命令验证 RESTful 接口可以正常工作: + +```bash +$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql + +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StatsD 写入数据,在宿主机执行命令如下: + +```bash +$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 +``` + +然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容: + +```bash +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | +Query OK, 2 row(s) in set (0.002112s) + +taos> use statsd; +Database changed. + +taos> show stables; + name | created_time | columns | tags | tables | +============================================================================================ + foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 | +Query OK, 1 row(s) in set (0.001160s) + +taos> select * from foo; + ts | value | metric_type | +======================================================================================= + 2021-12-28 09:21:48.840820836 | 1 | counter | +Query OK, 1 row(s) in set (0.001639s) + +taos> +``` + +可以看到模拟数据已经被写入到 TDengine 中。 + +### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server + +1,在宿主机命令行界面执行 taosBenchmark (曾命名为 taosdemo)写入数据到 Docker 容器中的 TDengine server + +``` +$ taosBenchmark + +taosBenchmark is simulating data generated by power equipments monitoring... + +host: 127.0.0.1:6030 +user: root +password: taosdata +configDir: +resultFile: ./output.txt +thread num of insert data: 10 +thread num of create table: 10 +top insert interval: 0 +``` + +使用 curl 命令验证 RESTful 接口可以正常工作: + ``` $ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql @@ -131,11 +206,13 @@ $ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0 ``` taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下: + ``` $ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 ``` 然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容: + ``` taos> show databases; name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | @@ -165,19 +242,82 @@ taos> 可以看到模拟数据已经被写入到 TDengine 中。 -### 应用示例:在宿主机使用 taosdemo 写入数据到 Docker 容器中的 TDengine server +### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server -1,在宿主机命令行界面执行 taosdemo 写入数据到 Docker 容器中的 TDengine server +1,在宿主机命令行界面执行 taosBenchmark 写入数据到 Docker 容器中的 TDengine server ```bash -$ taosdemo +$ taosBenchmark -taosdemo is simulating data generated by power equipments monitoring... +taosBenchmark is simulating data generated by power equipments monitoring... host: 127.0.0.1:6030 user: root password: taosdata -configDir: +configDir: +resultFile: ./output.txt +thread num of insert data: 10 +thread num of create table: 10 +top insert interval: 0 +``` + +使用 curl 命令验证 RESTful 接口可以正常工作: + +``` +$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql + +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +taosAdapter 支持多个数据收集代理软件(如 Telegraf、StatsD、collectd 等),这里仅模拟 StasD 写入数据,在宿主机执行命令如下: +``` +$ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 +``` + +然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容: + +``` +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | +Query OK, 2 row(s) in set (0.002112s) + +taos> use statsd; +Database changed. + +taos> show stables; + name | created_time | columns | tags | tables | +============================================================================================ + foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 | +Query OK, 1 row(s) in set (0.001160s) + +taos> select * from foo; + ts | value | metric_type | +======================================================================================= + 2021-12-28 09:21:48.840820836 | 1 | counter | +Query OK, 1 row(s) in set (0.001639s) + +taos> +``` + +可以看到模拟数据已经被写入到 TDengine 中。 + + +### 应用示例:在宿主机使用 taosBenchmark 写入数据到 Docker 容器中的 TDengine server + +1,在宿主机命令行界面执行 taosBenchmark 写入数据到 Docker 容器中的 TDengine server + +```bash +$ taosBenchmark + +taosBenchmark is simulating data generated by power equipments monitoring... + +host: 127.0.0.1:6030 +user: root +password: taosdata +configDir: resultFile: ./output.txt thread num of insert data: 10 thread num of create table: 10 @@ -206,13 +346,13 @@ database[0]: maxSqlLen: 1048576 timeStampStep: 1 startTimestamp: 2017-07-14 10:40:00.000 - sampleFormat: - sampleFile: - tagsFile: + sampleFormat: + sampleFile: + tagsFile: columnCount: 3 -column[0]:FLOAT column[1]:INT column[2]:FLOAT +column[0]:FLOAT column[1]:INT column[2]:FLOAT tagCount: 2 - tag[0]:INT tag[1]:BINARY(16) + tag[0]:INT tag[1]:BINARY(16) Press enter key to continue or Ctrl-C to stop ``` @@ -221,17 +361,17 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT 最后共插入 1 亿条记录。 -2,进入 TDengine 终端,查看 taosdemo 生成的数据。 +2,进入 TDengine 终端,查看 taosBenchmark 生成的数据。 - **进入命令行。** ```bash -$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos +$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. -taos> +taos> ``` - **查看数据库。** diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md index c01c2efb514c22883bbc9a8bd07a974ba37d3019..64200f17ff5912d4741ea69f7e4dffaa99f7c5c3 100644 --- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md @@ -1,20 +1,20 @@ - 如何使用 taosdemo 进行性能测试 + 如何使用 taosBenchmark 进行性能测试 == -自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosdemo 用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosdemo 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosdemo 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。 +自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosBenchmark (曾命名为 taosdemo)用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosBenchmark 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosBenchmark 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。 -运行 taosdemo 很简单,通过下载 TDengine 安装包( https://www.taosdata.com/cn/all-downloads/ )或者自行下载 TDengine 代码( https://github.com/taosdata/TDengine )编译都可以在安装目录或者编译结果目录中找到并运行。 +运行 taosBenchmark 很简单,通过下载 TDengine 安装包( https://www.taosdata.com/cn/all-downloads/ )或者自行下载 TDengine 代码( https://github.com/taosdata/TDengine )编译都可以在安装目录或者编译结果目录中找到并运行。 -接下来本文为大家讲解 taosdemo 的使用介绍及注意事项。 +接下来本文为大家讲解 taosBenchmark 的使用介绍及注意事项。 -使用 taosdemo 进行写入测试 +使用 taosBenchmark 进行写入测试 -- -不使用任何参数的情况下执行 taosdemo 命令,输出如下: +不使用任何参数的情况下执行 taosBenchmark 命令,输出如下: ``` -$ taosdemo +$ taosBenchmark -taosdemo is simulating data generated by power equipment monitoring... +taosBenchmark is simulating data generated by power equipment monitoring... host: 127.0.0.1:6030 user: root @@ -58,7 +58,7 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT Press enter key to continue or Ctrl-C to stop ``` -这里显示的是接下来 taosdemo 进行数据写入的各项参数。默认不输入任何命令行参数的情况下 taosdemo 将模拟生成一个电力行业典型应用的电表数据采集场景数据。即建立一个名为 test 的数据库,并创建一个名为 meters 的超级表,其中表结构为: +这里显示的是接下来 taosBenchmark 进行数据写入的各项参数。默认不输入任何命令行参数的情况下 taosBenchmark 将模拟生成一个电力行业典型应用的电表数据采集场景数据。即建立一个名为 test 的数据库,并创建一个名为 meters 的超级表,其中表结构为: ``` taos> describe test.meters; Field | Type | Length | Note | @@ -71,7 +71,7 @@ taos> describe test.meters; location | BINARY | 64 | TAG | Query OK, 6 row(s) in set (0.002972s) ``` -按任意键后 taosdemo 将建立数据库 test 和超级表 meters,并按照 TDengine 数据建模的最佳实践,以 meters 超级表为模板生成一万个子表,代表一万个独立上报数据的电表设备。 +按任意键后 taosBenchmark 将建立数据库 test 和超级表 meters,并按照 TDengine 数据建模的最佳实践,以 meters 超级表为模板生成一万个子表,代表一万个独立上报数据的电表设备。 ``` taos> use test; Database changed. @@ -82,7 +82,7 @@ taos> show stables; meters | 2021-08-27 11:21:01.209 | 4 | 2 | 10000 | Query OK, 1 row(s) in set (0.001740s) ``` -然后 taosdemo 为每个电表设备模拟生成一万条记录: +然后 taosBenchmark 为每个电表设备模拟生成一万条记录: ``` ... ====thread[3] completed total inserted rows: 6250000, total affected rows: 6250000. 347626.22 records/second==== @@ -99,9 +99,9 @@ Spent 18.0863 seconds to insert rows: 100000000, affected rows: 100000000 with 1 insert delay, avg: 28.64ms, max: 112.92ms, min: 9.35ms ``` -以上信息是在一台具备 8个CPU 64G 内存的普通 PC 服务器上进行实测的结果。显示 taosdemo 用了 18 秒的时间插入了 100000000 (一亿)条记录,平均每秒钟插入 552 万 9千零49 条记录。 +以上信息是在一台具备 8个CPU 64G 内存的普通 PC 服务器上进行实测的结果。显示 taosBenchmark 用了 18 秒的时间插入了 100000000 (一亿)条记录,平均每秒钟插入 552 万 9千零49 条记录。 -TDengine 还提供性能更好的参数绑定接口,而在同样的硬件上使用参数绑定接口 (taosdemo -I stmt )进行相同数据量的写入,结果如下: +TDengine 还提供性能更好的参数绑定接口,而在同样的硬件上使用参数绑定接口 (taosBenchmark -I stmt )进行相同数据量的写入,结果如下: ``` ... @@ -136,14 +136,14 @@ Spent 6.0257 seconds to insert rows: 100000000, affected rows: 100000000 with 16 insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms ``` -显示 taosdemo 用了 6 秒的时间插入了一亿条记录,每秒钟插入性能高达 1659 万 5 千 590 条记录。 +显示 taosBenchmark 用了 6 秒的时间插入了一亿条记录,每秒钟插入性能高达 1659 万 5 千 590 条记录。 -由于 taosdemo 使用起来非常方便,我们又对 taosdemo 做了更多的功能扩充,使其支持更复杂的参数设置,便于进行快速原型开发的样例数据准备和验证工作。 +由于 taosBenchmark 使用起来非常方便,我们又对 taosBenchmark 做了更多的功能扩充,使其支持更复杂的参数设置,便于进行快速原型开发的样例数据准备和验证工作。 -完整的 taosdemo 命令行参数列表可以通过 taosdemo --help 显示如下: +完整的 taosBenchmark 命令行参数列表可以通过 taosBenchmark --help 显示如下: ``` -$ taosdemo --help +$ taosBenchmark --help -f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. -u, --user=USER The user name to use when connecting to the server. @@ -151,7 +151,7 @@ $ taosdemo --help -c, --config-dir=CONFIG_DIR Configuration directory. -h, --host=HOST TDengine server FQDN to connect. The default host is localhost. -P, --port=PORT The TCP/IP port number to use for the connection. --I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'. +-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. By default use 'taosc'. -d, --database=DATABASE Destination database. By default is 'test'. -a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3. -m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'. @@ -187,15 +187,15 @@ for any corresponding short options. Report bugs to . ``` -taosdemo 的参数是为了满足数据模拟的需求来设计的。下面介绍几个常用的参数: +taosBenchmark 的参数是为了满足数据模拟的需求来设计的。下面介绍几个常用的参数: ``` --I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'. +-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. Default is 'taosc'. ``` -前面介绍 taosdemo 不同接口的性能差异已经提到, -I 参数为选择不同的接口,目前支持 taosc、stmt 和 rest 几种。其中 taosc 为使用 SQL 语句方式进行数据写入;stmt 为使用参数绑定接口进行数据写入;rest 为使用 RESTful 协议进行数据写入。 +前面介绍 taosBenchmark 不同接口的性能差异已经提到, -I 参数为选择不同的接口,目前支持 taosc、stmt 和 rest 几种。其中 taosc 为使用 SQL 语句方式进行数据写入;stmt 为使用参数绑定接口进行数据写入;rest 为使用 RESTful 协议进行数据写入。 ``` -T, --threads=NUMBER The number of threads. Default is 8. ``` --T 参数设置 taosdemo 使用多少个线程进行数据同步写入,通过多线程可以尽最大可能压榨硬件的处理能力。 +-T 参数设置 taosBenchmark 使用多少个线程进行数据同步写入,通过多线程可以尽最大可能压榨硬件的处理能力。 ``` -b, --data-type=DATATYPE The data_type of columns, default: FLOAT, INT, FLOAT. @@ -203,7 +203,7 @@ taosdemo 的参数是为了满足数据模拟的需求来设计的。下面介 -l, --columns=COLUMNS The number of columns per record. Demo mode by default is 3 (float, int, float). Max values is 4095 ``` -前文提到,taosdemo 默认创建一个典型电表数据采集应用场景,每个设备包含电流电压相位3个采集量。对于需要定义不同的采集量,可以使用 -b 参数。TDengine 支持 BOOL、TINYINT、SMALLINT、INT、BIGINT、FLOAT、DOUBLE、BINARY、NCHAR、TIMESTAMP 等多种数据类型。通过 -b 加上以“ , ”(英文逗号)分割定制类型的列表可以使 taosdemo 建立对应的超级表和子表并插入相应模拟数据。通过 -w 参数可以指定 BINARY 和 NCHAR 数据类型的列的宽度(默认为 64 )。-l 参数可以在 -b 参数指定数据类型的几列之后补充以 INT 型的总的列数,特别多列的情况下可以减少手工输入的过程,最多支持到 4095 列。 +前文提到,taosBenchmark 默认创建一个典型电表数据采集应用场景,每个设备包含电流电压相位3个采集量。对于需要定义不同的采集量,可以使用 -b 参数。TDengine 支持 BOOL、TINYINT、SMALLINT、INT、BIGINT、FLOAT、DOUBLE、BINARY、NCHAR、TIMESTAMP 等多种数据类型。通过 -b 加上以“ , ”(英文逗号)分割定制类型的列表可以使 taosBenchmark 建立对应的超级表和子表并插入相应模拟数据。通过 -w 参数可以指定 BINARY 和 NCHAR 数据类型的列的宽度(默认为 64 )。-l 参数可以在 -b 参数指定数据类型的几列之后补充以 INT 型的总的列数,特别多列的情况下可以减少手工输入的过程,最多支持到 4095 列。 ``` -r, --rec-per-req=NUMBER The number of records per request. Default is 30000. ``` @@ -213,28 +213,28 @@ taosdemo 的参数是为了满足数据模拟的需求来设计的。下面介 -n, --records=NUMBER The number of records per table. Default is 10000. -M, --random The value of records generated are totally random. The default is to simulate power equipment senario. ``` -前面提到 taosdemo 默认创建 10000 个表,每个表写入 10000 条记录。可以通过 -t 和 -n 设置表的数量和每个表的记录的数量。默认无参数生成的数据为模拟真实场景,模拟生成的数据为电流电压相位值增加一定的抖动,可以更真实表现 TDengine 高效的数据压缩能力。如果需要模拟生成完全随机数据,可以通过 -M 参数。 +前面提到 taosBenchmark 默认创建 10000 个表,每个表写入 10000 条记录。可以通过 -t 和 -n 设置表的数量和每个表的记录的数量。默认无参数生成的数据为模拟真实场景,模拟生成的数据为电流电压相位值增加一定的抖动,可以更真实表现 TDengine 高效的数据压缩能力。如果需要模拟生成完全随机数据,可以通过 -M 参数。 ``` -y, --answer-yes Default input yes for prompt. ``` -前面我们可以看到 taosdemo 默认在进行创建数据库或插入数据之前输出将要进行操作的参数列表,方便使用者在插入之前了解即将进行的数据写入的内容。为了方便进行自动测试,-y 参数可以使 taosdemo 输出参数后立刻进行数据写入操作。 +前面我们可以看到 taosBenchmark 默认在进行创建数据库或插入数据之前输出将要进行操作的参数列表,方便使用者在插入之前了解即将进行的数据写入的内容。为了方便进行自动测试,-y 参数可以使 taosBenchmark 输出参数后立刻进行数据写入操作。 ``` -O, --disorder=NUMBER Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order. -R, --disorder-range=NUMBER Out of order data's range, ms, default is 1000. ``` -在某些场景,接收到的数据并不是完全按时间顺序到来,而是包含一定比例的乱序数据,TDengine 也能进行很好的处理。为了模拟乱序数据的写入,taosdemo 提供 -O 和 -R 参数进行设置。-O 参数为 0 和不使用 -O 参数相同为完全有序数据写入。1 到 50 为数据中包含乱序数据的比例。-R 参数为乱序数据时间戳偏移的范围,默认为 1000 毫秒。另外注意,时序数据以时间戳为唯一标识,所以乱序数据可能会生成和之前已经写入数据完全相同的时间戳,这样的数据会根据数据库创建的 update 值或者被丢弃(update 0)或者覆盖已有数据(update 1 或 2),而总的数据条数可能和期待的条数不一致的情况。 +在某些场景,接收到的数据并不是完全按时间顺序到来,而是包含一定比例的乱序数据,TDengine 也能进行很好的处理。为了模拟乱序数据的写入,taosBenchmark 提供 -O 和 -R 参数进行设置。-O 参数为 0 和不使用 -O 参数相同为完全有序数据写入。1 到 50 为数据中包含乱序数据的比例。-R 参数为乱序数据时间戳偏移的范围,默认为 1000 毫秒。另外注意,时序数据以时间戳为唯一标识,所以乱序数据可能会生成和之前已经写入数据完全相同的时间戳,这样的数据会根据数据库创建的 update 值或者被丢弃(update 0)或者覆盖已有数据(update 1 或 2),而总的数据条数可能和期待的条数不一致的情况。 ``` -g, --debug Print debug info. ``` -如果对 taosdemo 写入数据过程感兴趣或者数据写入结果不符合预期,可以使用 -g 参数使 taosdemo 打印执行过程中间调试信息到屏幕上,或通过 Linux 重定向命令导入到另外一个文件,方便找到发生问题的原因。另外 taosdemo 在执行失败后也会把相应执行的语句和调试原因输出到屏幕。可以搜索 reason 来找到 TDengine 服务端返回的错误原因信息。 +如果对 taosBenchmark 写入数据过程感兴趣或者数据写入结果不符合预期,可以使用 -g 参数使 taosBenchmark 打印执行过程中间调试信息到屏幕上,或通过 Linux 重定向命令导入到另外一个文件,方便找到发生问题的原因。另外 taosBenchmark 在执行失败后也会把相应执行的语句和调试原因输出到屏幕。可以搜索 reason 来找到 TDengine 服务端返回的错误原因信息。 ``` -x, --aggr-func Test aggregation funtions after insertion. ``` -TDengine 不仅仅是插入性能非常强大,由于其先进的数据库引擎设计使查询性能也异常强大。taosdemo 提供一个 -x 函数,可以在插入数据结束后进行常用查询操作并输出查询消耗时间。以下为在前述服务器上进行插入一亿条记录后进行常用查询的结果。 +TDengine 不仅仅是插入性能非常强大,由于其先进的数据库引擎设计使查询性能也异常强大。taosBenchmark 提供一个 -x 函数,可以在插入数据结束后进行常用查询操作并输出查询消耗时间。以下为在前述服务器上进行插入一亿条记录后进行常用查询的结果。 可以看到 select * 取出一亿条记录(不输出到屏幕)操作仅消耗1.26秒。而对一亿条记录进行常用的聚合函数操作通常仅需要二十几毫秒,时间最长的 count 函数也不到四十毫秒。 ``` -taosdemo -I stmt -T 48 -y -x +taosBenchmark -I stmt -T 48 -y -x ... ... select * took 1.266835 second(s) @@ -254,7 +254,7 @@ select min(current) took 0.025812 second(s) select first(current) took 0.024105 second(s) ... ``` -除了命令行方式, taosdemo 还支持接受指定一个 JSON 文件做为传入参数的方式来提供更丰富的设置。一个典型的 JSON 文件内容如下: +除了命令行方式, taosBenchmark 还支持接受指定一个 JSON 文件做为传入参数的方式来提供更丰富的设置。一个典型的 JSON 文件内容如下: ``` { "filetype": "insert", @@ -317,11 +317,11 @@ select first(current) took 0.024105 second(s) }] } ``` -例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosdemo 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 csv 文件,来实现导入已有数据的功能。 +例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosBenchmark 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 csv 文件,来实现导入已有数据的功能。 -使用 taosdemo 进行查询和订阅测试 +使用 taosBenchmark 进行查询和订阅测试 -- -taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功能。但一个 taosdemo 实例只能支持其中的一种功能,不能同时支持三种功能,通过配置文件来指定进行哪种功能的测试。 +taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅功能。但一个 taosBenchmark 实例只能支持其中的一种功能,不能同时支持三种功能,通过配置文件来指定进行哪种功能的测试。 以下为一个典型查询 JSON 示例文件内容: ``` @@ -433,23 +433,23 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功 -- TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。TDengine 由于数据库内核中创新的数据存储和查询引擎设计,展现出远超同类产品的高效性能。并且由于支持 SQL 语法和多种编程语言的连接器(目前支持 Java, Python, Go, C#, NodeJS, Rust 等),易用性极强,学习成本为零。为了便于运维需求,我们还提供数据迁移和监控功能等相关生态工具软件。 -为了刚接触 TDengine 的使用者方便进行技术评估和压力测试,我们为 taosdemo 开发了丰富的特性。本文即为对 taosdemo 的一个简单介绍,随着 TDengine 新功能的不断增加,taosdemo 也会继续演化和改进。taosdemo 的代码做为 TDengine 的一部分在 GitHub 上完全开源。欢迎就 taosdemo 或 TDengine 的使用或实现在 GitHub 或者涛思数据的用户群提出建议或批评。 +为了刚接触 TDengine 的使用者方便进行技术评估和压力测试,我们为 taosBenchmark 开发了丰富的特性。本文即为对 taosBenchmark 的一个简单介绍,随着 TDengine 新功能的不断增加,taosBenchmark 也会继续演化和改进。taosBenchmark 的代码做为 TDengine 的一部分在 GitHub 上完全开源。欢迎就 taosBenchmark 或 TDengine 的使用或实现在 GitHub 或者涛思数据的用户群提出建议或批评。 -附录 - 完整 taosdemo 参数介绍 +附录 - 完整 taosBenchmark 参数介绍 -- -taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。 +taosBenchmark支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。 一、命令行参数 --f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 +-f:指定taosBenchmark所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 -u: 用户名。可选项,缺省是“root“。 -p: 密码。可选项,缺省是“taosdata"。指定密码需要使用 MySQL 风格,即密码和 -p 贴紧方式,中间无空格。 --c: 配置文件taos.cfg所在的路径。因为taosdemo通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 +-c: 配置文件taos.cfg所在的路径。因为taosBenchmark通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 -h:taosd服务的FQDN。可选项,缺省是“localhost“。 @@ -491,7 +491,7 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是 -M: 插入数据为完全随机。可选项,缺省为模拟能源设备真实场景(数据在固定范围小幅波动)。 --x:不仅仅插入数据。有该选项时,taosdemo还会进行聚合函数查询操作。 +-x:不仅仅插入数据。有该选项时,taosBenchmark还会进行聚合函数查询操作。 -y:提示询问输入时缺省输入yes。 @@ -501,14 +501,14 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是 -g:打印debug信息 --V: 打印taosdemo的debug信息。 +-V: 打印taosBenchmark的debug信息。 --help: 打印命令参数列表。 二、JSON 格式的配置文件中所有参数说明 -taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个taosdemo实例不能同时支持三种功能,一个 taosdemo 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。 +taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一个taosBenchmark实例不能同时支持三种功能,一个 taosBenchmark 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。 1、插入功能测试的 JSON 配置文件 ``` @@ -575,9 +575,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta } ``` -"filetype": 本taosdemo实例进行哪种功能测试。"insert"表示数据插入功能。必选项。 +"filetype": 本taosBenchmark实例进行哪种功能测试。"insert"表示数据插入功能。必选项。 -"cfgdir": 配置文件taos.cfg所在的路径。因为taosdemo通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 +"cfgdir": 配置文件taos.cfg所在的路径。因为taosBenchmark通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 "host": taosd服务的FQDN。可选项,缺省是“localhost“。 @@ -655,7 +655,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta "childtable_offset": 插入数据时,子表起始值。只在drop=no && child_table_exists= yes,该字段生效。 -"childtable_limit": 插入数据时,子表从offset开始,偏移的表数目。使用者可以运行多个 taosdemo 实例(甚至可以在不同的机器上)通过使用不同的 childtable_offset 和 childtable_limit 配置值来实现同时写入相同数据库相同超级表下多个子表。只在drop=no && child_table_exists= yes,该字段生效。 +"childtable_limit": 插入数据时,子表从offset开始,偏移的表数目。使用者可以运行多个 taosBenchmark 实例(甚至可以在不同的机器上)通过使用不同的 childtable_offset 和 childtable_limit 配置值来实现同时写入相同数据库相同超级表下多个子表。只在drop=no && child_table_exists= yes,该字段生效。 "interlace_rows": 跟上面的配置一致,不过该处的配置优先,每个stable可以有自己单独的配置。最大不超过 num_of_records_per_req。 @@ -740,9 +740,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta } ``` -"filetype": 本taosdemo实例进行哪种功能测试。"query"表示数据查询功能。必选项。 +"filetype": 本taosBenchmark实例进行哪种功能测试。"query"表示数据查询功能。必选项。 -"cfgdir": 配置文件taos.cfg所在的路径。因为taosdemo通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 +"cfgdir": 配置文件taos.cfg所在的路径。因为taosBenchmark通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 "host": taosd服务的FQDN。可选项,缺省是“localhost“。 @@ -830,9 +830,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta } ``` -"filetype": 本taosdemo实例进行哪种功能测试。"subscribe"表示数据查询功能。必选项。** +"filetype": 本taosBenchmark实例进行哪种功能测试。"subscribe"表示数据查询功能。必选项。** -"cfgdir": 配置文件taos.cfg所在的路径。因为taosdemo通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 +"cfgdir": 配置文件taos.cfg所在的路径。因为taosBenchmark通过包含taos的动态库,去链接taosd服务,所以需要做好配置文件。可选项,缺省是 "/etc/taos"路径。 "host": taosd服务的FQDN。可选项,缺省是“localhost“。 diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index f38522b5c257fdb3f72e833e72f14f4c9acdefb0..c254c59fbf915ccad21874fb24ae3c2b115e7b1f 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -2,7 +2,7 @@ ## 快捷安装 -TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。 +TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd,其中 2.4 之后版本默认使用单独运行的独立组件 taosAdapter 提供 http 服务,之前版本使用内置 http 服务。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。 ### 通过源码安装 @@ -134,10 +134,10 @@ taos> source ; ## TDengine 极速体验 -启动 TDengine 的服务,在 Linux 终端执行 taosdemo +启动 TDengine 的服务,在 Linux 终端执行 taosBenchmark (曾命名为 taosdemo,在 2.4 之后的版本请按照独立的 taosTools 软件包): ```bash -$ taosdemo +$ taosBenchmark ``` 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 @@ -175,17 +175,15 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group ```mysql taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ``` -## taosdemo 详细功能列表 +## taosBenchmark 详细功能列表 -taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。 -taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性能测试](https://www.taosdata.com/cn/documentation/getting-started/taosdemo )。 +taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosBenchmark --help` 详细列出。您可以设置不同参数进行体验。 +taosBenchmark 详细使用方法请参照 [如何使用taosBenchmark对TDengine进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。 -## 客户端和报警模块 +## 客户端 如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。 -报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。 - ## 支持平台列表 ### TDengine 服务器支持的平台列表 diff --git a/documentation20/cn/07.advanced-features/docs.md b/documentation20/cn/07.advanced-features/docs.md index 32e7a2aabdce54d65a352d8bf91395c3cfc9b32d..36516cf31d969152178137c65777526c5027ce10 100644 --- a/documentation20/cn/07.advanced-features/docs.md +++ b/documentation20/cn/07.advanced-features/docs.md @@ -356,10 +356,3 @@ select last_row(voltage) from meters where location='Beijing.Chaoyang'; ``` 该SQL语句将获取所有位于北京朝阳区的电表最后记录的电压值。 - - -## 报警监测(Alert) - -在 TDengine 的应用场景中,报警监测是一个常见需求,从概念上说,它要求程序从最近一段时间的数据中筛选出符合一定条件的数据,并基于这些数据根据定义好的公式计算出一个结果,当这个结果符合某个条件且持续一定时间后,以某种形式通知用户。 - -为了满足用户对报警监测的需求,TDengine 以独立模块的形式提供了这一功能,有关它的安装使用方法,请参考博客 [使用 TDengine 进行报警监测](https://www.taosdata.com/blog/2020/04/14/1438.html) 。 diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index f7d002bac4727cd58ea26e7fd201bcac26a2846f..89900455c2302e887153e710c0467ec4dc702a5b 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -46,25 +46,26 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 -注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.2.0.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如: +注意: +* 与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。例如: ```sql INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); ``` +* 从taos-jdbcdriver-2.0.36和TDengine 2.2.0.0 版本开始,如果在url中指定了dbname,那么,JDBC-RESTful会默认使用/rest/sql/dbname作为resful请求的url,在sql中不需要指定dbname。例如:url为jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行sql:insert into t1 using weather(ts, temperatrue) tags('beijing') values(now, 24.6); ## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 -| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | -|--------------------|--------------------| -------- | -| 2.0.36 | 2.4.0 及以上 | 1.8.x | -| 2.0.35 | 2.3.0 及以上 | 1.8.x | -| 2.0.33 - 2.0.34 | 2.0.3.0 及以上 | 1.8.x | -| 2.0.31 - 2.0.32 | 2.1.3.0 及以上 | 1.8.x | -| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | -| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | -| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | -| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | +| taos-jdbcdriver 版本 | TDengine 2.0.x.x 版本 | TDengine 2.2.x.x 版本 | TDengine 2.4.x.x 版本 | JDK 版本 | +|---------------------| ----------------------| ----------------------| ----------------------| -------- | +| 2.0.37 | X | X | 2.4.0.4 | 1.8.x | +| 2.0.36 | X | 2.2.2.11 以上 | 2.4.0.0 - 2.4.0.3 | 1.8.x | +| 2.0.35 | X | 2.2.2.11 以上 | 2.3.0.0 - 2.4.0.3 | 1.8.x | +| 2.0.33 - 2.0.34 | 2.0.3.0 以上 | 2.2.0.0 以上 | 2.4.0.0 - 2.4.0.3 | 1.8.x | +| 2.0.31 - 2.0.32 | 2.1.3.0 - 2.1.7.7 | X | X | 1.8.x | +| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.1 | X | X | 1.8.x | +| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.4 | X | X | 1.8.x | +| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.3 | X | X | 1.8.x | + ## TDengine DataType 和 Java DataType diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index ecd9770e6a52ce06440d4788daaa527b194b5fef..9423dd97dbc11248017e0efb1238d6b779a9bb21 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -822,7 +822,7 @@ k1 = conn.query("select info->'k1' as k1 from s1").fetch_all_into_dict() 为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 -注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。) +注意:与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。从 2.4.0.0 版本开始,RESTful 默认有 taosAdapter 提供,要求必须在 url 中指定 db_name。) ### 安装 diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md index 69825e655940045669fedeafdc9ab709c7ed15d9..2ebbe5e43988bc7165ce9234085d66768dc34191 100644 --- a/documentation20/cn/09.connections/docs.md +++ b/documentation20/cn/09.connections/docs.md @@ -64,7 +64,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource ![img](../images/connections/add_datasource3.jpg) -* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041 。 +* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041。注意:从 2.4 版本开始 RESTful 服务默认使用独立组件 taosAdapter 提供,请参考相关文档配置部署。 * User:TDengine 用户名。 * Password:TDengine 用户密码。 diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index d8936ad8c27387aaff3d6fde3cdb70915290a114..ab43ab11ac1ba9ec0aa9ca13daa6c771dd62b085 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -235,7 +235,7 @@ SHOW MNODES; 当上述三种情况发生时,系统将启动各个数据节点的负载计算,从而决定如何挪动。 -**【提示】负载均衡由参数balance控制,它决定是否启动自动负载均衡。** +**【提示】负载均衡由参数balance控制,它决定是否启动自动负载均衡, 0 表示禁用, 1 表示启用自动负载均衡。** ## 数据节点离线处理 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index b24b432fd4eee893b077a8a85306bfa9642851f5..aa96abb08b395061ee1e9773771ae606cf174352 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -119,7 +119,7 @@ taosd -C | 1 | firstEP | | **SC** | | taosd启动时,主动连接的集群中首个dnode的end point | | localhost:6030 | | | 2 | secondEP | YES | **SC** | | taosd启动时,如果firstEp连接不上,尝试连接集群中第二个dnode的end point | | 无 | | | 3 | fqdn | | **SC** | | 数据节点的FQDN。如果习惯IP地址访问,可设置为该节点的IP地址。 | | 缺省为操作系统配置的第一个hostname。 | 这个参数值的长度需要控制在 96 个字符以内。 | -| 4 | serverPort | | **SC** | | taosd启动后,对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11,即默认值为6041。 | +| 4 | serverPort | | **SC** | | taosd启动后,对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11,即默认值为6041(注意2.4及后续版本使用 taosAdapter 提供 RESTful 接口)。 | | 5 | logDir | | **SC** | | 日志文件目录,客户端和服务器的运行日志将写入该目录 | | /var/log/taos | | | 6 | scriptDir | YES | **S** | | | | | | | 7 | dataDir | | **S** | | 数据文件目录,所有的数据文件都将写入该目录 | | /var/lib/taos | | @@ -180,10 +180,10 @@ taosd -C | 62 | http | | **S** | | 服务器内部的http服务开关。 | 0:关闭http服务, 1:激活http服务。 | 1 | | | 63 | mqtt | YES | **S** | | 服务器内部的mqtt服务开关。 | 0:关闭mqtt服务, 1:激活mqtt服务。 | 0 | | | 64 | monitor | | **S** | | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录,记录信息存储在`LOG`库中。 | 0:关闭监控服务, 1:激活监控服务。 | 0 | | -| 65 | httpEnableRecordSql | | **S** | | 内部使用,记录通过RESTFul接口,产生的SQL调用 | | 0 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 | -| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数 | | 2 | | +| 65 | httpEnableRecordSql | | **S** | | 内部使用,记录通过RESTFul接口,产生的SQL调用。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter)。 | | 0 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 | +| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter)。 | | 2 | | | 67 | telegrafUseFieldNum | YES | | | | | | | -| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数 | | 10240 | 最大10,000,000 | +| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数。taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter)。 | | 10240 | 最大10,000,000 | | 69 | numOfLogLines | | **SC** | | 单个日志文件允许的最大行数。 | | 10,000,000 | | | 70 | asyncLog | | **SC** | | 日志写入模式 | 0:同步、1:异步 | 1 | | | 71 | logKeepDays | | **SC** | 天 | 日志文件的最长保存时间 | | 0 | 大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳。 | @@ -641,9 +641,11 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 - *taosd*:TDengine服务端可执行文件 - *taos*:TDengine Shell可执行文件 - *taosdump*:数据导入导出工具 -- *taosdemo*:TDengine测试工具 +- *taosBenchmark*:TDengine测试工具 - remove.sh:卸载TDengine的脚本,请谨慎执行,链接到/usr/bin目录下的**rmtaos**命令。会删除TDengine的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos。 +注意:2.4.0.0 版本之后的 taosBenchmark 和 taosdump 需要安装独立安装包 taosTools。 + 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。 ## TDengine 的启动、停止、卸载 @@ -692,6 +694,13 @@ rmtaos 1. 合法字符:英文字符、数字和下划线 2. 允许英文字符或下划线开头,不允许以数字开头 3. 不区分大小写 +4. 转义后表(列)名规则: + 为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。 + 转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 + + 例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。 + 需要注意的是转义字符中的内容必须是可打印字符。 + 支持转义符的功能从 2.3.0.1 版本开始。 **密码合法字符集** @@ -761,6 +770,27 @@ rmtaos | CONNS | ID | NOTNULL | STABLE | WAL | | COPY | IF | NOW | STABLES | WHERE | +## 转义字符说明 +- 转义字符表(转义符的功能从 2.4.0.4 版本开始) + + | 字符序列 | **代表的字符** | + | :--------: | ------- | + | `\'` | 单引号' | + | `\"` | 双引号" | + | \n | 换行符 | + | \r | 回车符 | + | \t | tab符 | + | `\\` | 斜杠\ | + | `\%` | % 规则见下 | + | `\_` | _ 规则见下 | + +- 转义字符使用规则 + 1. 标识符里有转义字符(数据库名、表名、列名) + 1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。 + 2. 反引号``标识符: 保持原样,不转义 + 2. 数据里有转义字符 + 1. 遇到上面定义的转义字符会转义(%和_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。 + 2. 对于%和_,因为在like里这两个字符是通配符,所以在模式匹配like里用`\%`%和`\_`表示字符里本身的%和_,如果在like模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和_。 ## 诊断及其他 #### 网络连接诊断 diff --git a/documentation20/cn/12.taos-sql/02.udf/docs.md b/documentation20/cn/12.taos-sql/02.udf/docs.md index bb8303455364c6f10d32f4745d152e462b5faf24..95f9eb96377aec6cd00566bdcc6dd52baea2817c 100644 --- a/documentation20/cn/12.taos-sql/02.udf/docs.md +++ b/documentation20/cn/12.taos-sql/02.udf/docs.md @@ -7,9 +7,10 @@ ## 用 C/C++ 语言来定义 UDF TDengine 提供 3 个 UDF 的源代码示例,分别为: -* [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) -* [abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) -* [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) +* [add_one.c](#add_one.c) +* [abs_max.c](#abs_max.c) +* [demo.c](#demo.c) + ### 标量函数 @@ -142,3 +143,266 @@ SELECT X(c) FROM table/stable; 4. UDF 只要创建成功,就会被持久化存储到 MNode 节点中; 5. 无法通过 RESTful 接口来创建 UDF; 6. UDF 在 SQL 中定义的函数名,必须与 .so 库文件实现中的接口函数名前缀保持一致,也即必须是 udfNormalFunc 的名称,而且不可与 TDengine 中已有的内建 SQL 函数重名。 + +## 代码附件 +### [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) + + +``` +#include +#include +#include + +typedef struct SUdfInit{ + int maybe_null; /* 1 if function can return NULL */ + int decimals; /* for real functions */ + long long length; /* For string functions */ + char *ptr; /* free pointer for function data */ + int const_item; /* 0 if result is independent of arguments */ +} SUdfInit; + +void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput, + int* numOfOutput, short otype, short obytes, SUdfInit* buf) { + int i; + int r = 0; + // printf("add_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); + if (itype == 4) { + for(i=0;i + +### [abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) + + + +``` +#include +#include +#include +#include + +typedef struct SUdfInit{ + int maybe_null; /* 1 if function can return NULL */ + int decimals; /* for real functions */ + int64_t length; /* For string functions */ + char *ptr; /* free pointer for function data */ + int const_item; /* 0 if result is independent of arguments */ +} SUdfInit; + + +#define TSDB_DATA_INT_NULL 0x80000000L +#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L + +void abs_max(char* data, short itype, short ibytes, int numOfRows, int64_t* ts, char* dataOutput, char* interBuf, char* tsOutput, + int* numOfOutput, short otype, short obytes, SUdfInit* buf) { + int i; + int64_t r = 0; + // printf("abs_max input data:%p, type:%d, rows:%d, ts:%p, %" PRId64 ", dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); + if (itype == 5) { + r=*(int64_t *)dataOutput; + *numOfOutput=0; + + for(i=0;i r) { + r = v; + } + } + + *(int64_t *)dataOutput=r; + + // printf("abs_max out, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput); + }else { + *numOfOutput=0; + } +} + + + +void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) { + int i; + //int64_t r = 0; + // printf("abs_max_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p\n", dataOutput, *dataOutput, *numOfOutput, buf); + // *numOfOutput=1; + // printf("abs_max finalize, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput); +} + +void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) { + int64_t r = 0; + + if (numOfRows > 0) { + r = *((int64_t *)data); + } + // printf("abs_max_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf); + for (int i = 1; i < numOfRows; ++i) { + // printf("abs_max_merge %d - %" PRId64"\n", i, *((int64_t *)data + i)); + if (*((int64_t*)data + i) > r) { + r= *((int64_t*)data + i); + } + } + + *(int64_t*)dataOutput=r; + if (numOfRows > 0) { + *numOfOutput=1; + } else { + *numOfOutput=0; + } + + // printf("abs_max_merge, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput); +} + + +int abs_max_init(SUdfInit* buf) { + // printf("abs_max init\n"); + return 0; +} + + +void abs_max_destroy(SUdfInit* buf) { + // printf("abs_max destroy\n"); +} +``` + + +### [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) + + + +``` +#include +#include +#include + +typedef struct SUdfInit{ + int maybe_null; /* 1 if function can return NULL */ + int decimals; /* for real functions */ + long long length; /* For string functions */ + char *ptr; /* free pointer for function data */ + int const_item; /* 0 if result is independent of arguments */ +} SUdfInit; + +typedef struct SDemo{ + double sum; + int num; + short otype; +}SDemo; + +#define FLOAT_NULL 0x7FF00000 // it is an NAN +#define DOUBLE_NULL 0x7FFFFF0000000000L // it is an NAN + + +void demo(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, + int* numOfOutput, short otype, short obytes, SUdfInit* buf) { + int i; + double r = 0; + SDemo *p = (SDemo *)interBuf; + SDemo *q = (SDemo *)dataOutput; + printf("demo input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, interBUf:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, interBuf, tsOutput, numOfOutput, buf); + + for(i=0;isum += r*r; + } + + p->otype = otype; + p->num += numOfRows; + + q->sum = p->sum; + q->num = p->num; + q->otype = p->otype; + + *numOfOutput=1; + + printf("demo out, sum:%f, num:%d, numOfOutput:%d\n", p->sum, p->num, *numOfOutput); +} + + +void demo_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) { + int i; + SDemo *p = (SDemo *)data; + SDemo res = {0}; + printf("demo_merge input data:%p, rows:%d, dataoutput:%p, numOfOutput:%p, buf:%p\n", data, numOfRows, dataOutput, numOfOutput, buf); + + for(i=0;isum * p->sum; + res.num += p->num; + p++; + } + + p->sum = res.sum; + p->num = res.num; + + *numOfOutput=1; + + printf("demo out, sum:%f, num:%d, numOfOutput:%d\n", p->sum, p->num, *numOfOutput); +} + + + +void demo_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) { + SDemo *p = (SDemo *)interBuf; + printf("demo_finalize interbuf:%p, numOfOutput:%p, buf:%p, sum:%f, num:%d\n", interBuf, numOfOutput, buf, p->sum, p->num); + if (p->otype == 6) { + if (p->num != 30000) { + *(unsigned int *)dataOutput = FLOAT_NULL; + } else { + *(float *)dataOutput = (float)(p->sum / p->num); + } + printf("finalize values:%f\n", *(float *)dataOutput); + } else if (p->otype == 7) { + if (p->num != 30000) { + *(unsigned long long *)dataOutput = DOUBLE_NULL; + } else { + *(double *)dataOutput = (double)(p->sum / p->num); + } + printf("finalize values:%f\n", *(double *)dataOutput); + } + + *numOfOutput=1; + + printf("demo finalize, numOfOutput:%d\n", *numOfOutput); +} + + +int demo_init(SUdfInit* buf) { + printf("demo init\n"); + return 0; +} + + +void demo_destroy(SUdfInit* buf) { + printf("demo destroy\n"); +} +``` + + + diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 1bd55bca1058ac21727e767ce29cfaed1beae035..411ee5a34e6f72d56f30f33f55cb85687918df8a 100755 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -37,7 +37,11 @@ taos> DESCRIBE meters; - Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度的逻辑也是类似的。) - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 -TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度) +TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度) + +```mysql +CREATE DATABASE db_name PRECISION 'ns'; +``` 在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。 @@ -601,7 +605,6 @@ SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; SELECT DISTINCT col_name [, col_name ...] FROM tb_name; ``` -需要注意的是,DISTINCT 目前不支持对超级表中的普通列进行处理。如果需要进行此类操作,那么需要把超级表放在子查询中,再对子查询的计算结果执行 DISTINCT。 说明: 1. cfg 文件中的配置参数 maxNumOfDistinctRes 将对 DISTINCT 能够输出的数据行数进行限制。其最小值是 100000,最大值是 100000000,默认值是 10000000。如果实际计算结果超出了这个限制,那么会仅输出这个数量范围内的部分。 @@ -1689,7 +1692,10 @@ SELECT function_list FROM stb_name [GROUP BY tags] ``` -- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。 +- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。 +- 此外也 LAST_ROW 查询也不能与窗口聚合同时出现。 +- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。 +- - WHERE 语句可以指定查询的起止时间和其他过滤条件。 @@ -1754,10 +1760,10 @@ IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非 ## 表(列)名合法性说明 TDengine 中的表(列)名命名规则如下: -只能由字母、数字、下划线构成,数字不能在首位,长度不能超过192字节,不区分大小写。 +只能由字母、数字、下划线构成,数字不能在首位,长度不能超过192字节,不区分大小写。这里表名称不包括数据库名的前缀和分隔符。 转移后表(列)名规则: -为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。 +为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查,转义符不计入表名称的长度。 转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 例如: @@ -1799,9 +1805,9 @@ TDengine 中的表(列)名命名规则如下: 1. 在where条件中时,支持函数match/nmatch/between and/like/and/or/is null/is no null,不支持in ```mysql - select * from s1 where info→'k1' match 'v*'; + select * from s1 where info->'k1' match 'v*'; - select * from s1 where info→'k1' like 'v%' and info contains 'k2'; + select * from s1 where info->'k1' like 'v%' and info contains 'k2'; select * from s1 where info is null; @@ -1813,7 +1819,7 @@ TDengine 中的表(列)名命名规则如下: 3. 支持distinct操作. ```mysql - select distinct info→'k1' from s1 + select distinct info->'k1' from s1 ``` 5. 标签操作 @@ -1844,7 +1850,7 @@ TDengine 中的表(列)名命名规则如下: 比如暂不支持 ```mysql - select jtag→'key' from (select jtag from stable) + select jtag->'key' from (select jtag from stable) ``` 不支持 @@ -1852,7 +1858,7 @@ TDengine 中的表(列)名命名规则如下: select jtag->'key' from (select jtag from stable) where jtag->'key'>0 ``` ## 转义字符说明 -- 转义字符表 +- 转义字符表 (转义符的功能从 2.4.0.4 版本开始) | 字符序列 | **代表的字符** | | :--------: | ------- | @@ -1863,7 +1869,7 @@ TDengine 中的表(列)名命名规则如下: | \t | tab符 | | `\\` | 斜杠\ | | `\%` | % 规则见下 | - | `\%` | _ 规则见下 | + | `\_` | _ 规则见下 | - 转义字符使用规则 1. 标识符里有转义字符(数据库名、表名、列名) @@ -1871,4 +1877,4 @@ TDengine 中的表(列)名命名规则如下: 2. 反引号``标识符: 保持原样,不转义 2. 数据里有转义字符 1. 遇到上面定义的转义字符会转义(%和_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。 - 2. 对于%和_,因为在like里这两个字符是通配符,所以在模式匹配like里用`\%`%和`\_`表示字符里本身的%和_,如果在like模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和_。 \ No newline at end of file + 2. 对于%和_,因为在like里这两个字符是通配符,所以在模式匹配like里用`\%`%和`\_`表示字符里本身的%和_,如果在like模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和_。 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index 507ffc09ba954ed6acba39ece128ebbbe5a4142e..209f07b666d4b93e0c05be460e6a37117bbb8b16 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -79,6 +79,8 @@ 2. 如果网络配置有DNS server,请检查是否正常工作 3. 如果网络没有配置DNS server,请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址 4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法连接服务器的 +5. 如果服务器曾经使用过TDengine,且更改过hostname,建议检查data目录的dnodeEps.json是否符合当前配置的EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。 +6. 检查/etc/hosts 和/etc/hostname是否是预配置的FQDN **7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误** @@ -186,7 +188,7 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 | TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 | | TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 | | TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 | -| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 | +| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](https://www.taosdata.com/cn/documentation/tools/adapter)。 | | TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 | | TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 | | TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 | @@ -197,7 +199,7 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 **20. go 语言编写组件编译失败怎样解决?** -新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 +新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index 238ac792482379b510e974b6b97c614dd900de80..5d983dcd95998a2870168d26f26cecf192a43cc7 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -1,7 +1,6 @@ # TDengine Documentation -TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Modeling sections. In addition to this document, you should also download and read the technology white paper. For the older TDengine version 1.6 documentation, please click [here](https://www.taosdata.com/en/documentation16/). - +TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Modeling sections. In addition to this document, you should also download and read the technology white paper. ## [TDengine Introduction](/evaluation) * [TDengine Introduction and Features](/evaluation#intro) @@ -35,6 +34,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## [Efficient Data Ingestion](/insert) - [Data Writing via SQL](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command +- [Data Writing via Schemaless](/insert#schemaless): write one or multiple records with automatic table creation and adaptive table structure maintenance - [Data Writing via Prometheus](/insert#prometheus): Configure Prometheus to write data directly without any code - [Data Writing via Telegraf](/insert#telegraf): Configure Telegraf to write collected data directly without any code - [Data Writing via EMQ X](/insert#emq): Configure EMQ X to write MQTT data directly without any code @@ -79,11 +79,12 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment - [Rust Connector](/connector/rust): A taosc/RESTful API based TDengine client for Rust -## [Components and Tools](/cn/documentation/) +## [Components and Tools] -* [taosAdapter User Manual](/tools/adapter) -* [TDinsight User Manual](/tools/insight) -* [taos-tools User Manual](/tools/taos-tools) +* [taosAdapter](/tools/adapter): a bridge/adapter between TDengine cluster and applications. +* [TDinsight](/tools/insight): monitoring TDengine cluster with Grafana. +* [taosdump](/tools/taosdump): backup tool for TDengine. Please install `taosTools` package for it. +* [taosBenchmark](/tools/taosbenchmark): stress test tool for TDengine. Please install `taosTools` package for it. ## [Connections with Other Tools](/connections) diff --git a/documentation20/en/02.getting-started/01.docker/docs.md b/documentation20/en/02.getting-started/01.docker/docs.md index 84e95a53a281593a47622621285acdfc575aa409..780d8e70b8103cc95bf74ff6221fdaa71d4e8535 100644 --- a/documentation20/en/02.getting-started/01.docker/docs.md +++ b/documentation20/en/02.getting-started/01.docker/docs.md @@ -20,11 +20,11 @@ Docker version 20.10.3, build 48d30b5 ### running TDengine server inside Docker ```bash -$ docker run -d -p 6030-6041:6030-6041 -p 6030-6041:6030-6041/udp tdengine/tdengine +$ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd ``` -This command starts a docker container with TDengine server running and maps the container's ports from 6030 to 6041 to the host's ports from 6030 to 6041. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see [TDengine 2.0 Port Description](https://www.taosdata.com/en/documentation/faq#port) for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be open. +This command starts a docker container with TDengine server running and maps the container's ports from 6030 to 6049 to the host's ports from 6030 to 6049. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see [TDengine 2.0 Port Description](https://www.taosdata.com/en/documentation/faq#port) for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be open. - **docker run**: Run a container via Docker - **-d**: put the container run in the background @@ -61,7 +61,7 @@ c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes · ```bash $ docker exec -it tdengine /bin/bash -root@tdengine-server:~/TDengine-server-2.0.20.13# +root@tdengine-server:~/TDengine-server-2.4.0.4# ``` - **docker exec**: Enter the container by `docker exec` command, if exited, the container will not stop. @@ -73,9 +73,9 @@ root@tdengine-server:~/TDengine-server-2.0.20.13# After entering the container, execute the taos shell client program. ```bash -root@tdengine-server:~/TDengine-server-2.0.20.13# taos +root@tdengine-server:~/TDengine-server-2.4.0.4# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> @@ -92,7 +92,7 @@ After starting the TDengine Docker container with the correct port mapped with t ``` $ taos -Welcome to the TDengine shell from Linux, Client Version:2.0.22.3 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> @@ -116,10 +116,12 @@ Docker containers of TDegnine version 2.4.0.0 and later include a component name Note: If taosAdapter is running inside the container, you need to add mapping to other additional ports as needed, please refer to [taosAdapter documentation](https://github.com/taosdata/taosadapter/blob/develop/README.md) for the default port number and modification methods for the specific purpose. -Running TDengine version 2.4.0.0 image with docker. +Running TDengine version 2.4.0.4 image with docker. + +Start taosAdapter and taosd by default: ``` -$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.0 +$ docker run -d --name tdengine-taosa -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine:2.4.0.4 ``` Verify that the RESTful interface taosAdapter provides working using the `curl` command. @@ -164,13 +166,13 @@ taos> You can see that the simulation data has been written to TDengine. -### Application example: write data to TDengine server in Docker container using taosdemo on the host +### Application example: write data to TDengine server in Docker container using taosBenchmark on the host -1, execute taosdemo in the host command line interface to write data to the TDengine server in the Docker container +1, execute `taosBenchmark` (was named taosdemo) in the host command line interface to write data to the TDengine server in the Docker container ```bash -$ taosdemo +$ taosBenchmark -taosdemo is simulating data generated by power equipments monitoring... +taosBenchmark is simulating data generated by power equipments monitoring... host: 127.0.0.1:6030 user: root @@ -219,14 +221,14 @@ After enter, this command will automatically create a super table `meters` under It takes about a few minutes to execute this command and ends up inserting a total of 100 million records. -3, Go to the TDengine terminal and view the data generated by taosdemo. +3, Go to the TDengine terminal and view the data generated by taosBenchmark. - **Go to the terminal interface.** ```bash -$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos +$ root@c452519b0f9b:~/TDengine-server-2.4.0.4# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Welcome to the TDengine shell from Linux, Client Version:2.4.0.4 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. taos> diff --git a/documentation20/en/02.getting-started/02.taosdemo/docs.md b/documentation20/en/02.getting-started/02.taosdemo/docs.md index c872d2971ef3cce250592df0534af5369c4682dd..2fd09ef3d3774d1bc47091c9eaa4020d6f937bc0 100644 --- a/documentation20/en/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/en/02.getting-started/02.taosdemo/docs.md @@ -1,15 +1,15 @@ -Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called taosdemo for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosdemo customized parameters. +Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called `taosBenchmark` (was named `taosdemo`) for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosBenchmark customized parameters. -Running taosdemo is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. +Running taosBenchmark is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. -To run an insertion test with taosdemo +To run an insertion test with taosBenchmark -- -Executing taosdemo without any parameters results in the following output. +Executing taosBenchmark without any parameters results in the following output. ``` -$ taosdemo +$ taosBenchmark -taosdemo is simulating data generated by power equipment monitoring... +taosBenchmark is simulating data generated by power equipment monitoring... host: 127.0.0.1:6030 user: root @@ -54,7 +54,7 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT Press enter key to continue or Ctrl-C to stop ``` -The parameters here shows for what taosdemo will use for data insertion. By default, taosdemo without entering any command line arguments will simulate a city power grid system's meter data collection scenario as a typical application in the power industry. That is, a database named test will be created, and a super table named meters will be created, where the super table schema is following: +The parameters here shows for what taosBenchmark will use for data insertion. By default, taosBenchmark without entering any command line arguments will simulate a city power grid system's meter data collection scenario as a typical application in the power industry. That is, a database named test will be created, and a super table named meters will be created, where the super table schema is following: ``` taos> describe test.meters; @@ -69,7 +69,7 @@ taos> describe test.meters; Query OK, 6 row(s) in set (0.002972s) ``` -After pressing any key taosdemo will create the database test and super table meters and generate 10,000 sub-tables representing 10,000 individule meter devices that report data. That means they independently using the super table meters as a template according to TDengine data modeling best practices. +After pressing any key taosBenchmark will create the database test and super table meters and generate 10,000 sub-tables representing 10,000 individule meter devices that report data. That means they independently using the super table meters as a template according to TDengine data modeling best practices. ``` taos> use test; Database changed. @@ -91,7 +91,7 @@ taos> show stables; meters | 2021-08-27 11:21:01.209 | 4 | 2 | 10000 | Query OK, 1 row(s) in set (0.001740s) ``` -Then taosdemo generates 10,000 records for each meter device. +Then taosBenchmark generates 10,000 records for each meter device. ``` ... ====thread[3] completed total inserted rows: 6250000, total affected rows: 6250000. 347626.22 records/second==== @@ -108,9 +108,9 @@ Spent 18.0863 seconds to insert rows: 100000000, affected rows: 100000000 with 1 insert delay, avg: 28.64ms, max: 112.92ms, min: 9.35ms ``` -The above information is the result of a real test on a normal PC server with 8 CPUs and 64G RAM. It shows that taosdemo inserted 100,000,000 (no need to count, 100 million) records in 18 seconds, or an average of 552,909,049 records per second. +The above information is the result of a real test on a normal PC server with 8 CPUs and 64G RAM. It shows that taosBenchmark inserted 100,000,000 (no need to count, 100 million) records in 18 seconds, or an average of 552,909,049 records per second. -TDengine also offers a parameter-bind interface for better performance, and using the parameter-bind interface (taosdemo -I stmt) on the same hardware for the same amount of data writes, the results are as follows. +TDengine also offers a parameter-bind interface for better performance, and using the parameter-bind interface (taosBenchmark -I stmt) on the same hardware for the same amount of data writes, the results are as follows. ``` ... @@ -145,14 +145,14 @@ Spent 6.0257 seconds to insert rows: 100000000, affected rows: 100000000 with 16 insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms ``` -It shows that taosdemo inserted 100 million records in 6 seconds, with a much more higher insertion performance, 1,659,590 records wer inserted per second. +It shows that taosBenchmark inserted 100 million records in 6 seconds, with a much more higher insertion performance, 1,659,590 records wer inserted per second. -Because taosdemo is so easy to use, so we have extended it with more features to support more complex parameter settings for sample data preparation and validation for rapid prototyping. +Because taosBenchmark is so easy to use, so we have extended it with more features to support more complex parameter settings for sample data preparation and validation for rapid prototyping. -The complete list of taosdemo command-line arguments can be displayed via taosdemo --help as follows. +The complete list of taosBenchmark command-line arguments can be displayed via taosBenchmark --help as follows. ``` -$ taosdemo --help +$ taosBenchmark --help -f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. -u, --user=USER The user name to use when connecting to the server. @@ -160,7 +160,7 @@ $ taosdemo --help -c, --config-dir=CONFIG_DIR Configuration directory. -h, --host=HOST TDengine server FQDN to connect. The default host is localhost. -P, --port=PORT The TCP/IP port number to use for the connection. --I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'. +-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. By default use 'taosc'. -d, --database=DATABASE Destination database. By default is 'test'. -a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3. -m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'. @@ -196,16 +196,16 @@ for any corresponding short options. Report bugs to . ``` -taosdemo's parameters are designed to meet the needs of data simulation. A few commonly used parameters are described below. +taosBenchmark's parameters are designed to meet the needs of data simulation. A few commonly used parameters are described below. ``` --I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'. +-I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. Default is 'taosc'. ``` -The performance difference between different interfaces of taosdemo has been mentioned earlier, the -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. The -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. taosc uses SQL statements to write data, stmt uses parameter binding interface to write data, and rest uses RESTful protocol to write data. +The performance difference between different interfaces of taosBenchmark has been mentioned earlier, the -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. The -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. taosc uses SQL statements to write data, stmt uses parameter binding interface to write data, and rest uses RESTful protocol to write data. ``` -T, --threads=NUMBER The number of threads. Default is 8. ``` -The -T parameter sets how many threads taosdemo uses to synchronize data writes, so that multiple threads can squeeze as much processing power out of the hardware as possible. +The -T parameter sets how many threads taosBenchmark uses to synchronize data writes, so that multiple threads can squeeze as much processing power out of the hardware as possible. ``` -b, --data-type=DATATYPE The data_type of columns, default: FLOAT, INT, FLOAT. @@ -223,11 +223,11 @@ To reach TDengine performance limits, data insertion can be executed by using mu -n, --records=NUMBER The number of records per table. Default is 10000. -M, --random The value of records generated are totally random. The default is to simulate power equipment scenario. ``` -As mentioned earlier, taosdemo creates 10,000 tables by default, and each table writes 10,000 records. taosdemo can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter. +As mentioned earlier, taosBenchmark creates 10,000 tables by default, and each table writes 10,000 records. taosBenchmark can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter. ``` -y, --answer-yes Default input yes for prompt. ``` -As we can see above, taosdemo outputs a list of parameters for the upcoming operation by default before creating a database or inserting data, so that the user can know what data is about to be written before inserting. To facilitate automatic testing, the -y parameter allows taosdemo to write data immediately after outputting the parameters. +As we can see above, taosBenchmark outputs a list of parameters for the upcoming operation by default before creating a database or inserting data, so that the user can know what data is about to be written before inserting. To facilitate automatic testing, the -y parameter allows taosBenchmark to write data immediately after outputting the parameters. ``` -O, --disorder=NUMBER Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order. -R, --disorder-range=NUMBER Out of order data's range, ms, default is 1000. @@ -236,7 +236,7 @@ In some scenarios, the received data does not arrive in exact order, but contain ``` -g, --debug Print debug info. ``` -If you are interested in the taosdemo insertion process or if the data insertion result is not as expected, you can use the -g parameter to make taosdemo print the debugging information in the process of the execution to the screen or import it to another file with the Linux redirect command to easily find the cause of the problem. In addition, taosdemo will also output the corresponding executed statements and debugging reasons to the screen after the execution fails. You can search the word "reason" to find the error reason information returned by the TDengine server. +If you are interested in the taosBenchmark insertion process or if the data insertion result is not as expected, you can use the -g parameter to make taosBenchmark print the debugging information in the process of the execution to the screen or import it to another file with the Linux redirect command to easily find the cause of the problem. In addition, taosBenchmark will also output the corresponding executed statements and debugging reasons to the screen after the execution fails. You can search the word "reason" to find the error reason information returned by the TDengine server. ``` -x, --aggr-func Test aggregation funtions after insertion. ``` @@ -244,7 +244,7 @@ TDengine is not only very powerful in insertion performance, but also in query p You can see that the select * fetch 100 million rows (not output to the screen) operation consumes only 1.26 seconds. The most of normal aggregation function for 100 million records usually takes only about 20 milliseconds, and even the longest count function takes less than 40 milliseconds. ``` -taosdemo -I stmt -T 48 -y -x +taosBenchmark -I stmt -T 48 -y -x ... ... select * took 1.266835 second(s) @@ -264,7 +264,7 @@ select min(current) took 0.025812 second(s) select first(current) took 0.024105 second(s) ... ``` -In addition to the command line approach, taosdemo also supports take a JSON file as an incoming parameter to provide a richer set of settings. A typical JSON file would look like this. +In addition to the command line approach, taosBenchmark also supports take a JSON file as an incoming parameter to provide a richer set of settings. A typical JSON file would look like this. ``` { "filetype": "insert", @@ -327,11 +327,11 @@ In addition to the command line approach, taosdemo also supports take a JSON fil }] } ``` -For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosdemo processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file". +For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosBenchmark processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file". -Use taosdemo for query and subscription testing +Use taosBenchmark for query and subscription testing -- -taosdemo can not only write data, but also perform query and subscription functions. However, a taosdemo instance can only support one of these functions, not all three, and the configuration file is used to specify which function to test. +taosBenchmark can not only write data, but also perform query and subscription functions. However, a taosBenchmark instance can only support one of these functions, not all three, and the configuration file is used to specify which function to test. The following is the content of a typical query JSON example file. ``` @@ -443,7 +443,7 @@ Conclusion -- TDengine is a big data platform designed and optimized for IoT, Telematics, Industrial Internet, DevOps, etc. TDengine shows a high performance that far exceeds similar products due to the innovative data storage and query engine design in the database kernel. And withSQL syntax support and connectors for multiple programming languages (currently Java, Python, Go, C#, NodeJS, Rust, etc. are supported), it is extremely easy to use and has zero learning cost. To facilitate the operation and maintenance needs, we also provide data migration and monitoring functions and other related ecological tools and software. -For users who are new to TDengine, we have developed rich features for taosdemo to facilitate technical evaluation and stress testing. This article is a brief introduction to taosdemo, which will continue to evolve and improve as new features are added to TDengine. +For users who are new to TDengine, we have developed rich features for taosBenchmark to facilitate technical evaluation and stress testing. This article is a brief introduction to taosBenchmark, which will continue to evolve and improve as new features are added to TDengine. - As part of TDengine, taosdemo's source code is fully open on the GitHub. Suggestions or advices about the use or implementation of taosdemo or TDengine are welcomed on GitHub or in the Taos Data user group. + As part of TDengine, taosBenchmark's source code is fully open on the GitHub. Suggestions or advices about the use or implementation of taosBenchmark or TDengine are welcomed on GitHub or in the Taos Data user group. diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 7d7744be56259c5c1a6a74a8b407df607768d99d..29ca540e6e9bae6ffa7fbb7e0671889ccdf94375 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -2,7 +2,7 @@ ## Quick Install -TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). +TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. From 2.4 and later version, TDengine use a stand-alone software, taosAdapteer to provide http service. The early version uses the http server embedded in the taosd. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). ### Install from Source @@ -138,10 +138,10 @@ taos> source ; ## Experience TDengine’s Lightning Speed -After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal. +After starting the TDengine server, you can execute the command `taosBenchmark` (was named `taosdemo`, please install taosTools package if you use TDengine 2.4 or later version) in the Linux terminal. ```bash -$ taosdemo +$ taosBenchmark ``` Using this command, a STable named `meters` will be created in the database `test`. There are 10k tables under this STable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai". @@ -180,10 +180,10 @@ taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10; taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); ``` -## Using taosdemo in detail +## Using taosBenchmark in detail -you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options. -Please refer to [How to use taosdemo to test the performance of TDengine](https://www.taosdata.com/en/documentation/getting-started/taosdemo) for detail. +you can run command `taosBenchmark` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosBenchmark --help` and then take a try using different options. +Please refer to [How to use taosBenchmark to test the performance of TDengine](https://tdengine.com/2021/10/09/3114.html) for detail. ## Client and Alarm Module diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md index 45b767afc12c55121046b6950104a15653f53f8e..39aad609a9c23120fac68eaa4785bd0a6f46c198 100644 --- a/documentation20/en/05.insert/docs.md +++ b/documentation20/en/05.insert/docs.md @@ -31,6 +31,132 @@ For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosd - For the same table, if the timestamp of a newly inserted record already exists, the new record will be discarded as default (database option update = 0), that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record. - The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written. +## Data Writing via Schemaless +**Introduction** +
In many IoT applications, data collection is often used in intelligent control, business analysis and device monitoring etc. As fast application upgrade and iteration, or hardware adjustment, data collection metrics can change rapidly over time. To provide solutions to such use cases, from version 2.2.0.0, TDengine supports writing data via Schemaless. When using Schemaless, action of pre-creating table before inserting data is no longer needed anymore. Tables, data columns and tags can be created automatically. Schemaless can also add additional data columns to tables if necessary, to make sure data can be properly stored into TDengine. + +
TDengine C/C++ Connector provides Schemaless API. Please see [Schemaless data writting API](https://www.taosdata.com/en/documentation/connector#schemaless) for detailed data writing format. +
Super table and corresponding child tables created via Schemaless are identical to the ones created via SQL, so inserting data into these tables via SQL is also supported. Note that child table names are generated via Schemaless are following special rules through tags mapping. Therefore, child table names are usually not meaningful in terms of readability. + +**Schemaless writing protocols** +
TDengine Schemaless writing protocol is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet and JSON format protocols. Users need to specify which protocol to use as parameter when writing data using Schemaless API. + +For InfluxDB, OpenTSDB data writing protocol format, users can refer to corresponding official documentation for details. Following will give examples of introducing protocol extension from TDengine based on InfluxDB's Line Protocol, allowing users to use Schemaless with more precision. + +Schemaless use one line of string literals to represent one data record. (Users can also pass multiple lines to the Schemaless API for batch insertion), the format is as follows: +```json +measurement,tag_set field_set timestamp +``` + +* measurement is used as the table name. Comma delimiter is used to separate measurement and tag_set. +* tag_set represent tag data in key-value pairs. The format is: `=,=`. Comma delimiter is used to separate multiple tag key-value pairs. Space delimiter is used to separate tag_set and field_set. +* field_set represent column data in key-value pairs. The format is similar to tag_set: `=,=`. Comma delimiter is used to separate multiple tag key-value pairs. Space delimiter is used to separate field_set and timestamp. +* Timestamp is the primary key of one data row. + +All tag values in tag_set are automatically converted and stored as NCHAR data type in TDengine and no need to be surrounded by double quote(") +
In Schemaless Line Protocol, data format in field_set need to be self-descriptive in order to convert data to corresponding TDengine data types. For example: +* Field value surrounded by double quote indicates data is BINARY(32) data types. For example, `"abc"`. +* Field value surrounded by double quote and L letter prefix indicates data is NCHAR(32) data type. For example `L"报错信息"`. +* Space, equal sign(=), comma(,), double quote(") need to use backslash(\) to escape. +* Numerical values will be converted to corresponding data types according to the suffix: + +| **ID** | **Suffix** | **Data Type** | **Size(Bytes)** | +| ------ | ---------- | ------------- | ------ | +| 1 | NA / f64 | DOUBLE | 8 | +| 2 | f32 | FLOAT | 4 | +| 3 | i8 | TINYINT | 1 | +| 4 | i16 | SMALLINT | 2 | +| 5 | i32 | INT | 4 | +| 6 | i64 / i | BIGINT | 8 | +* t, T, true, True, TRUE, f, F, false, False represents BOOLEAN types。 + +### Schemaless processing logic + +Following rules are followed by Schemaless protocol parsing: + +
1. For child table name generation, firstly create following string by concatenating measurement and tag key/values strings together. +```json +"measurement,tag_key1=tag_value1,tag_key2=tag_value2" +``` +tag_key1, tag_key2 are not following the original order of user input, but sorted according to tag names. +After MD5 value "md5_val" calculated using the above string, prefix "t_" is prepended to "md5_val" to form the child table name. +
2. If super table does not exist, a new super table will be created. +
3. If child table does not exist, a new child table will be created with its name generated in 1 and 2. +
4. If columns/tags do not exist, new columns/tags will be created. (Columns/tags can only be added, existing columns/tags cannot be deleted) +
5. If columns/tags are not specified in a line, values of such columns/tags will be set to NULL. +
6. For BINARY/NCHAR type columns, if value length exceeds max length of the column, max length will be automatically extended to ensure data integrity. +
7. If child table is already created and tag value is different than previous stored value,old value will be overwritten by new value. +
8. If any error occurs during processing, error code will be returned. + +**Note** +
Schemaless will follow TDengine data structure limitations. For example, each table row cannot exceed 16KB. For detailed TDengine limitations please refer to (https://www.taosdata.com/en/documentation/taos-sql#limitation). + +**Timestamp precisions** +
Following protocols are supported in Schemaless: + +| **ID** | **Value** | **Description** | +| ---- | ---------------------------- | ------------------------------- | +| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | +| 2 | SML_TELNET_PROTOCOL | OpenTSDB telnet Protocol | +| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON format Protocol | + +
When SML_LINE_PROTOCOL used,users need to indicate timestamp precision through API。Available timestamp precisions are:
+ +| **ID** | **Precision Definition ** | **Meaning** | +| ------ | ------------------------------------- | -------------- | +| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | undefined | +| 2 | TSDB_SML_TIMESTAMP_HOURS | hour | +| 3 | TSDB_SML_TIMESTAMP_MINUTES | minute | +| 4 | TSDB_SML_TIMESTAMP_SECONDS | second | +| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | millisecond | +| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | microsecon | +| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | nanosecond | + +When SML_TELNET_PROTOCOL or SML_JSON_PROTOCOL used,timestamp precision is determined by how many digits used in timestamp(following OpenTSDB convention),precision from user input will be ignored。 + +**Schemaless data mapping rules** +
This section describes how Schemaless data are mapped to TDengine's structured data. Measurement is mapped to super table name. Keys in tag_set/field_set are mapped to tag/column names. For example: + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 +``` +Above line is mapped to a super table with name "st" with 3 NCHAR type tags ("t1", "t2", "t3") and 5 columns: ts(timestamp),c1 (bigint),c3(binary),c2 (bool), c4 (bigint). This is identical to create a super table with the following SQL clause: +```json +create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2)) +``` +**Schemaless data alternation rules** +
This section describes several data alternation scenarios: + +When column with one line has certain type, and following lines attemp to change the data type of this column, an error will be reported by the API: + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000 +``` +For first line of data, c4 column type is declared as DOUBLE with no suffix. However, the second line declared the column type to be BIGINT with suffix "i". Schemaless parsing error will be occurred. + +When column is declared as BINARY type, but follow-up line insertion requires longer BINARY length of this column, max length of this column will be extended: +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 +``` +In first line c5 column store string "pass" with 4 characters as BINARY(4), but in second line c5 requires 2 more characters for storing binary string "passit", c5 column max length will be extend from BINARY(4) to BINARY(6) to accommodate more characters. + +```json +st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 +``` +In above example second line has one more column c6 with value "passit" compared to the first line. A new column c6 will be added with type BINARY(6). + +**Data integrity** +
TDengine ensure data writing through Schemaless is idempotent, which means users can call the API multiple times for writing data with errors. However. atomicity is not guaranteed. When writing multiple lines of data as a batch, data might be partially inserted due to errors. + +**Error code** +
If users do not write data following corresponding protocol syntax, application will get TSDB_CODE_TSC_LINE_SYNTAX_ERROR error code, which indicates error is happened in input text. Other generic error codes returned by TDengine can also be obtained through taos_errstr API to get detailed error messages. + +**Future enhancement** +
Currently TDengine only provides clang API support for Schemaless. In future versions, APIs/connectors of more languages will be supported, e.g., Java/Go/Python/C# etc. From TDengine v2.3 and later versions, users can also use taosAdaptor to writing data via Schemaless through RESTful interface. + ## Data Writing via Prometheus As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine. @@ -168,6 +294,7 @@ Now you can query the metrics data of Telegraf from TDengine. Please find taosAdapter configuration and usage from `taosadapter --help` output. ## Data Writing via collectd and taosAdapter + Please refer to [official document](https://collectd.org/download.shtml) for collectd installation. TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from collectd. @@ -187,6 +314,7 @@ sudo systemctl start collectd Please find taosAdapter configuration and usage from `taosadapter --help` output. ## Data Writting via StatsD and taosAdapter + Please refer to [official document](https://github.com/statsd/statsd) for StatsD installation. TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from StatsD. diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index f8b444281587e03bb0b143d5ecd1c41abed9dd64..35e23e1bd12c5870b9012f0d168a072adca8b998 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -7,15 +7,15 @@ TDengine provides many connectors for development, including C/C++, JAVA, Python At present, TDengine connectors support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32. The comparison matrix is as follows: | **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Sunway** | **X64 TimecomTech** | -| ----------- | ------------- | ------------- | ------------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- | -| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** | -| **C/C++** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ | -| **JDBC** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ | -| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ | -| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | -| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | -| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | -| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | +| ----------- | ------------- | ------------- | ------------- | ------------- | --------- | --------- | --------------- | ---------------- | ------------------- | +| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** | +| **C/C++** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ | +| **JDBC** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ | +| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ | +| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | +| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | +| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | +| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | Note: ● stands for that has been verified by official tests; ○ stands for that has been verified by unofficial tests. @@ -151,10 +151,10 @@ Under cmd, enter the c:\TDengine directory and directly execute taos.exe, and yo **Systems supported by C/C++ connectors as follows:** -| **CPU Type** | **x64****(****64bit****)** | | | **ARM64** | **ARM32** | -| -------------------- | ---------------------------- | ------- | ------- | --------- | ------------------ | -| **OS Type** | Linux | Win64 | Win32 | Linux | Linux | -| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **Yes** | +| **CPU Type** | **x64****(****64bit****)** | | | **ARM64** | **ARM32** | +| -------------------- | ---------------------------- | ------- | ------- | --------- | --------- | +| **OS Type** | Linux | Win64 | Win32 | Linux | Linux | +| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **Yes** | The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include): @@ -661,6 +661,8 @@ In tests/examples/python, we provide a sample Python program read_example. py to To support the development of various types of platforms, TDengine provides an API that conforms to REST design standards, that is, RESTful API. In order to minimize the learning cost, different from other designs of database RESTful APIs, TDengine directly requests SQL statements contained in BODY through HTTP POST to operate the database, and only needs a URL. See the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1965.html) for the use of RESTful connectors. +Note: One difference from the native connector is that the RESTful interface is stateless, so the `USE db_name` command has no effect and all references to table names and super table names require the database name to be specified. (Starting from version 2.2.0.0, we support specifying db_name in the RESTful url, in which case if the database name prefix is not specified in the SQL statement. Since version 2.4.0.0, RESTful service is provided by taosAdapter by default, which requires that db_name must be specified in the url.) + ### HTTP request format ``` @@ -672,7 +674,7 @@ Parameter description: - IP: Any host in the cluster - PORT: httpPort configuration item in the configuration file, defaulting to 6041 -For example: [http://192.168.0.1](http://192.168.0.1/): 6041/rest/sql is a URL that points to an IP address of 192.168. 0.1. +For example: http://192.168.0.1:6041/rest/sql is a URL that points to an IP address of 192.168.0.1. The header of HTTP request needs to carry identity authentication information. TDengine supports Basic authentication and custom authentication. Subsequent versions will provide standard and secure digital signature mechanism for identity authentication. @@ -1003,10 +1005,10 @@ This API is used to open DB and return an object of type \* DB. Generally, DRIVE The Node.js connector supports the following systems: -| **CPU Type** | x64(64bit) | | | aarch64 | aarch32 | -| -------------------- | ---------------------------- | ------- | ------- | ----------- | ----------- | -| **OS Type** | Linux | Win64 | Win32 | Linux | Linux | -| **Supported or Not** | **Yes** | **Yes** | **Yes** | **Yes** | **Yes** | +| **CPU Type** | x64(64bit) | | | aarch64 | aarch32 | +| -------------------- | ----------- | ------- | ------- | ------- | ------- | +| **OS Type** | Linux | Win64 | Win32 | Linux | Linux | +| **Supported or Not** | **Yes** | **Yes** | **Yes** | **Yes** | **Yes** | See the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1957.html) for use of the Node.js connector. diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md index 0e15e58a531cbd783168802e919aa8095fe034bf..2d0886379754e7a2abd106a2359495c1df379389 100644 --- a/documentation20/en/09.connections/docs.md +++ b/documentation20/en/09.connections/docs.md @@ -63,7 +63,7 @@ Enter the data source configuration page and modify the corresponding configurat ![img](../images/connections/add_datasource3.jpg) -- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/) +- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), use [http://localhost:6041](http://localhost:6041/) to access the interface by default. Note the 2.4 and later version of TDengine use a stand-alone software, taosAdapter to provide RESTful interface. Please refer to its document for configuration and deployment. - User: TDengine username. - Password: TDengine user password. @@ -173,4 +173,4 @@ Please replace the IP address in the command above to the correct one. If no err The functions below are not supported currently: - `dbExistsTable(conn, "test")`: if table test exists -- `dbListTables(conn)`: list all tables in the connection \ No newline at end of file +- `dbListTables(conn)`: list all tables in the connection diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index 11dd3e482d5e68bb642a94c533f23d390edf61f3..d845ba6466987f66d6a0f86d6525ffa1cd96d85d 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -91,7 +91,7 @@ Only some important configuration parameters are listed below. For more paramete - firstEp: end point of the first dnode which will be connected in the cluster when taosd starts, the default value is localhost: 6030. - fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you want to access via IP address directly, you can set it to the IP address of the node. - serverPort: the port number of the external service after taosd started, the default value is 6030. -- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041. +- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041. Note 2.4 and later version use a stand-alone software, taosAdapter to provide RESTFul interface. - dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos). - logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos). - arbitrator: the end point of the arbitrator in the system; the default value is null. @@ -538,4 +538,4 @@ At the moment, TDengine has nearly 200 internal reserved keywords, which cannot | CONCAT | GLOB | METRICS | SET | VIEW | | CONFIGS | GRANTS | MIN | SHOW | WAVG | | CONFLICT | GROUP | MINUS | SLASH | WHERE | -| CONNECTION | | | | | \ No newline at end of file +| CONNECTION | | | | | diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index 41a3f464d3112084c0723ba962234316ab523ab4..16f85fc0467d3e3215ba1af9c820dd939374019f 100755 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -1336,7 +1336,7 @@ Is not null supports all types of columns. Non-null expression is < > "" and onl select jtag->'key' from (select jtag from stable) where jtag->'key'>0 ``` ## Escape character description -- Special Character Escape Sequences +- Special Character Escape Sequences (since version 2.4.0.4) | Escape Sequence | **Character Represented by Sequence** | | :--------: | ------------------- | diff --git a/packaging/cfg/jh_taos.cfg b/packaging/cfg/jh_taos.cfg new file mode 100644 index 0000000000000000000000000000000000000000..ba3fa8e462d2c9bcbb87046204cb7bd87fa27f9d --- /dev/null +++ b/packaging/cfg/jh_taos.cfg @@ -0,0 +1,286 @@ +######################################################## +# # +# jh_iot Configuration # +# Any questions, please email jhkj@njsteel.com.cn # +# # +######################################################## + +# first fully qualified domain name (FQDN) for jh_iot system +# firstEp hostname:6030 + +# local fully qualified domain name (FQDN) +# fqdn hostname + +# first port number for the connection (12 continuous UDP/TCP port number are used) +# serverPort 6030 + +# log file's directory +# logDir /var/log/jh_taos + +# data file's directory +# dataDir /var/lib/jh_taos + +# temporary file's directory +# tempDir /tmp/ + +# the arbitrator's fully qualified domain name (FQDN) for jh_iot system, for cluster only +# arbitrator arbitrator_hostname:6042 + +# number of threads per CPU core +# numOfThreadsPerCore 1.0 + +# number of threads to commit cache data +# numOfCommitThreads 4 + +# the proportion of total CPU cores available for query processing +# 2.0: the query threads will be set to double of the CPU cores. +# 1.0: all CPU cores are available for query processing [default]. +# 0.5: only half of the CPU cores are available for query. +# 0.0: only one core available. +# ratioOfQueryCores 1.0 + +# the last_row/first/last aggregator will not change the original column name in the result fields +keepColumnName 1 + +# number of management nodes in the system +# numOfMnodes 3 + +# enable/disable backuping vnode directory when removing vnode +# vnodeBak 1 + +# enable/disable installation / usage report +# telemetryReporting 1 + +# enable/disable load balancing +# balance 1 + +# role for dnode. 0 - any, 1 - mnode, 2 - dnode +# role 0 + +# max timer control blocks +# maxTmrCtrl 512 + +# time interval of system monitor, seconds +# monitorInterval 30 + +# number of seconds allowed for a dnode to be offline, for cluster only +# offlineThreshold 864000 + +# RPC re-try timer, millisecond +# rpcTimer 300 + +# RPC maximum time for ack, seconds. +# rpcMaxTime 600 + +# time interval of dnode status reporting to mnode, seconds, for cluster only +# statusInterval 1 + +# time interval of heart beat from shell to dnode, seconds +# shellActivityTimer 3 + +# minimum sliding window time, milli-second +# minSlidingTime 10 + +# minimum time window, milli-second +# minIntervalTime 10 + +# maximum delay before launching a stream computation, milli-second +# maxStreamCompDelay 20000 + +# maximum delay before launching a stream computation for the first time, milli-second +# maxFirstStreamCompDelay 10000 + +# retry delay when a stream computation fails, milli-second +# retryStreamCompDelay 10 + +# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 +# streamCompDelayRatio 0.1 + +# max number of vgroups per db, 0 means configured automatically +# maxVgroupsPerDb 0 + +# max number of tables per vnode +# maxTablesPerVnode 1000000 + +# cache block size (Mbyte) +# cache 16 + +# number of cache blocks per vnode +# blocks 6 + +# number of days per DB file +# days 10 + +# number of days to keep DB file +# keep 3650 + +# minimum rows of records in file block +# minRows 100 + +# maximum rows of records in file block +# maxRows 4096 + +# the number of acknowledgments required for successful data writing +# quorum 1 + +# enable/disable compression +# comp 2 + +# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync +# walLevel 1 + +# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away +# fsync 3000 + +# number of replications, for cluster only +# replica 1 + +# the compressed rpc message, option: +# -1 (no compression) +# 0 (all message compressed), +# > 0 (rpc message body which larger than this value will be compressed) +# compressMsgSize -1 + +# max length of an SQL +# maxSQLLength 65480 + +# max length of WildCards +# maxWildCardsLength 100 + +# the maximum number of records allowed for super table time sorting +# maxNumOfOrderedRes 100000 + +# system time zone +# timezone Asia/Shanghai (CST, +0800) +# system time zone (for windows 10) +# timezone UTC-8 + +# system locale +# locale en_US.UTF-8 + +# default system charset +# charset UTF-8 + +# max number of connections allowed in dnode +# maxShellConns 5000 + +# max number of connections allowed in client +# maxConnections 5000 + +# stop writing logs when the disk size of the log folder is less than this value +# minimalLogDirGB 0.1 + +# stop writing temporary files when the disk size of the tmp folder is less than this value +# minimalTmpDirGB 0.1 + +# if disk free space is less than this value, server service exit directly within startup process +# minimalDataDirGB 0.1 + +# One mnode is equal to the number of vnode consumed +# mnodeEqualVnodeNum 4 + +# enbale/disable http service +# http 1 + +# enable/disable system monitor +# monitor 1 + +# enable/disable recording the SQL statements via restful interface +# httpEnableRecordSql 0 + +# number of threads used to process http requests +# httpMaxThreads 2 + +# maximum number of rows returned by the restful interface +# restfulRowLimit 10240 + +# The following parameter is used to limit the maximum number of lines in log files. +# max number of lines per log filters +# numOfLogLines 10000000 + +# enable/disable async log +# asyncLog 1 + +# time of keeping log files, days +# logKeepDays 0 + + +# The following parameters are used for debug purpose only. +# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR +# 131: output warning and error +# 135: output debug, warning and error +# 143: output trace, debug, warning and error to log +# 199: output debug, warning and error to both screen and file +# 207: output trace, debug, warning and error to both screen and file + +# debug flag for all log type, take effect when non-zero value +# debugFlag 0 + +# debug flag for meta management messages +# mDebugFlag 135 + +# debug flag for dnode messages +# dDebugFlag 135 + +# debug flag for sync module +# sDebugFlag 135 + +# debug flag for WAL +# wDebugFlag 135 + +# debug flag for SDB +# sdbDebugFlag 135 + +# debug flag for RPC +# rpcDebugFlag 131 + +# debug flag for TIMER +# tmrDebugFlag 131 + +# debug flag for jh_iot client +# cDebugFlag 131 + +# debug flag for JNI +# jniDebugFlag 131 + +# debug flag for storage +# uDebugFlag 131 + +# debug flag for http server +# httpDebugFlag 131 + +# debug flag for monitor +# monDebugFlag 131 + +# debug flag for query +# qDebugFlag 131 + +# debug flag for vnode +# vDebugFlag 131 + +# debug flag for TSDB +# tsdbDebugFlag 131 + +# debug flag for continue query +# cqDebugFlag 131 + +# enable/disable recording the SQL in client +# enableRecordSql 0 + +# generate core file when service crash +# enableCoreFile 1 + +# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden +# maxBinaryDisplayWidth 30 + +# enable/disable stream (continuous query) +# stream 1 + +# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode +# retrieveBlockingModel 0 + +# the maximum allowed query buffer size in MB during query processing for each data node +# -1 no limit (default) +# 0 no query allowed, queries are disabled +# queryBufferSize -1 + diff --git a/packaging/cfg/jh_taosd.service b/packaging/cfg/jh_taosd.service new file mode 100644 index 0000000000000000000000000000000000000000..d02eb406131b5273785753178b7d6326203bfaef --- /dev/null +++ b/packaging/cfg/jh_taosd.service @@ -0,0 +1,21 @@ +[Unit] +Description=jh_iot server service +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/jh_taosd +ExecStartPre=/usr/local/jh_taos/bin/startPre.sh +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/cfg/khserver.service b/packaging/cfg/khserver.service new file mode 100644 index 0000000000000000000000000000000000000000..005afaddc06f6ba8e02112c074a7b3575d5974de --- /dev/null +++ b/packaging/cfg/khserver.service @@ -0,0 +1,21 @@ +[Unit] +Description=KingHistorian server service +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/khserver +ExecStartPre=/usr/local/kinghistorian/bin/startPre.sh +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/cfg/kinghistorian.cfg b/packaging/cfg/kinghistorian.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0d0d0b9f3eaae6539f9e431d51bb532270179226 --- /dev/null +++ b/packaging/cfg/kinghistorian.cfg @@ -0,0 +1,286 @@ +######################################################## +# # +# KingHistorian Configuration # +# Any questions, please email support@wellintech.com # +# # +######################################################## + +# first fully qualified domain name (FQDN) for KingHistorian system +# firstEp hostname:6030 + +# local fully qualified domain name (FQDN) +# fqdn hostname + +# first port number for the connection (12 continuous UDP/TCP port number are used) +# serverPort 6030 + +# log file's directory +# logDir /var/log/kinghistorian + +# data file's directory +# dataDir /var/lib/kinghistorian + +# temporary file's directory +# tempDir /tmp/ + +# the arbitrator's fully qualified domain name (FQDN) for KingHistorian system, for cluster only +# arbitrator arbitrator_hostname:6042 + +# number of threads per CPU core +# numOfThreadsPerCore 1.0 + +# number of threads to commit cache data +# numOfCommitThreads 4 + +# the proportion of total CPU cores available for query processing +# 2.0: the query threads will be set to double of the CPU cores. +# 1.0: all CPU cores are available for query processing [default]. +# 0.5: only half of the CPU cores are available for query. +# 0.0: only one core available. +# ratioOfQueryCores 1.0 + +# the last_row/first/last aggregator will not change the original column name in the result fields +keepColumnName 1 + +# number of management nodes in the system +# numOfMnodes 3 + +# enable/disable backuping vnode directory when removing vnode +# vnodeBak 1 + +# enable/disable installation / usage report +# telemetryReporting 1 + +# enable/disable load balancing +# balance 1 + +# role for dnode. 0 - any, 1 - mnode, 2 - dnode +# role 0 + +# max timer control blocks +# maxTmrCtrl 512 + +# time interval of system monitor, seconds +# monitorInterval 30 + +# number of seconds allowed for a dnode to be offline, for cluster only +# offlineThreshold 864000 + +# RPC re-try timer, millisecond +# rpcTimer 300 + +# RPC maximum time for ack, seconds. +# rpcMaxTime 600 + +# time interval of dnode status reporting to mnode, seconds, for cluster only +# statusInterval 1 + +# time interval of heart beat from shell to dnode, seconds +# shellActivityTimer 3 + +# minimum sliding window time, milli-second +# minSlidingTime 10 + +# minimum time window, milli-second +# minIntervalTime 10 + +# maximum delay before launching a stream computation, milli-second +# maxStreamCompDelay 20000 + +# maximum delay before launching a stream computation for the first time, milli-second +# maxFirstStreamCompDelay 10000 + +# retry delay when a stream computation fails, milli-second +# retryStreamCompDelay 10 + +# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 +# streamCompDelayRatio 0.1 + +# max number of vgroups per db, 0 means configured automatically +# maxVgroupsPerDb 0 + +# max number of tables per vnode +# maxTablesPerVnode 1000000 + +# cache block size (Mbyte) +# cache 16 + +# number of cache blocks per vnode +# blocks 6 + +# number of days per DB file +# days 10 + +# number of days to keep DB file +# keep 3650 + +# minimum rows of records in file block +# minRows 100 + +# maximum rows of records in file block +# maxRows 4096 + +# the number of acknowledgments required for successful data writing +# quorum 1 + +# enable/disable compression +# comp 2 + +# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync +# walLevel 1 + +# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away +# fsync 3000 + +# number of replications, for cluster only +# replica 1 + +# the compressed rpc message, option: +# -1 (no compression) +# 0 (all message compressed), +# > 0 (rpc message body which larger than this value will be compressed) +# compressMsgSize -1 + +# max length of an SQL +# maxSQLLength 65480 + +# max length of WildCards +# maxWildCardsLength 100 + +# the maximum number of records allowed for super table time sorting +# maxNumOfOrderedRes 100000 + +# system time zone +# timezone Asia/Shanghai (CST, +0800) +# system time zone (for windows 10) +# timezone UTC-8 + +# system locale +# locale en_US.UTF-8 + +# default system charset +# charset UTF-8 + +# max number of connections allowed in dnode +# maxShellConns 5000 + +# max number of connections allowed in client +# maxConnections 5000 + +# stop writing logs when the disk size of the log folder is less than this value +# minimalLogDirGB 0.1 + +# stop writing temporary files when the disk size of the tmp folder is less than this value +# minimalTmpDirGB 0.1 + +# if disk free space is less than this value, khserver service exit directly within startup process +# minimalDataDirGB 0.1 + +# One mnode is equal to the number of vnode consumed +# mnodeEqualVnodeNum 4 + +# enbale/disable http service +# http 1 + +# enable/disable system monitor +# monitor 1 + +# enable/disable recording the SQL statements via restful interface +# httpEnableRecordSql 0 + +# number of threads used to process http requests +# httpMaxThreads 2 + +# maximum number of rows returned by the restful interface +# restfulRowLimit 10240 + +# The following parameter is used to limit the maximum number of lines in log files. +# max number of lines per log filters +# numOfLogLines 10000000 + +# enable/disable async log +# asyncLog 1 + +# time of keeping log files, days +# logKeepDays 0 + + +# The following parameters are used for debug purpose only. +# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR +# 131: output warning and error +# 135: output debug, warning and error +# 143: output trace, debug, warning and error to log +# 199: output debug, warning and error to both screen and file +# 207: output trace, debug, warning and error to both screen and file + +# debug flag for all log type, take effect when non-zero value +# debugFlag 0 + +# debug flag for meta management messages +# mDebugFlag 135 + +# debug flag for dnode messages +# dDebugFlag 135 + +# debug flag for sync module +# sDebugFlag 135 + +# debug flag for WAL +# wDebugFlag 135 + +# debug flag for SDB +# sdbDebugFlag 135 + +# debug flag for RPC +# rpcDebugFlag 131 + +# debug flag for TIMER +# tmrDebugFlag 131 + +# debug flag for KingHistorian client +# cDebugFlag 131 + +# debug flag for JNI +# jniDebugFlag 131 + +# debug flag for storage +# uDebugFlag 131 + +# debug flag for http server +# httpDebugFlag 131 + +# debug flag for monitor +# monDebugFlag 131 + +# debug flag for query +# qDebugFlag 131 + +# debug flag for vnode +# vDebugFlag 131 + +# debug flag for TSDB +# tsdbDebugFlag 131 + +# debug flag for continue query +# cqDebugFlag 131 + +# enable/disable recording the SQL in kinghistorian client +# enableRecordSql 0 + +# generate core file when service crash +# enableCoreFile 1 + +# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden +# maxBinaryDisplayWidth 30 + +# enable/disable stream (continuous query) +# stream 1 + +# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode +# retrieveBlockingModel 0 + +# the maximum allowed query buffer size in MB during query processing for each data node +# -1 no limit (default) +# 0 no query allowed, queries are disabled +# queryBufferSize -1 + diff --git a/packaging/cfg/power.cfg b/packaging/cfg/power.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6f5e910a28c0471666243e275975243bc77d2fc5 --- /dev/null +++ b/packaging/cfg/power.cfg @@ -0,0 +1,286 @@ +######################################################## +# # +# PowerDB Configuration # +# Any questions, please email support@taosdata.com # +# # +######################################################## + +# first fully qualified domain name (FQDN) for PowerDB system +# firstEp hostname:6030 + +# local fully qualified domain name (FQDN) +# fqdn hostname + +# first port number for the connection (12 continuous UDP/TCP port number are used) +# serverPort 6030 + +# log file's directory +# logDir /var/log/power + +# data file's directory +# dataDir /var/lib/power + +# temporary file's directory +# tempDir /tmp/ + +# the arbitrator's fully qualified domain name (FQDN) for PowerDB system, for cluster only +# arbitrator arbitrator_hostname:6042 + +# number of threads per CPU core +# numOfThreadsPerCore 1.0 + +# number of threads to commit cache data +# numOfCommitThreads 4 + +# the proportion of total CPU cores available for query processing +# 2.0: the query threads will be set to double of the CPU cores. +# 1.0: all CPU cores are available for query processing [default]. +# 0.5: only half of the CPU cores are available for query. +# 0.0: only one core available. +# ratioOfQueryCores 1.0 + +# the last_row/first/last aggregator will not change the original column name in the result fields +keepColumnName 1 + +# number of management nodes in the system +# numOfMnodes 3 + +# enable/disable backuping vnode directory when removing vnode +# vnodeBak 1 + +# enable/disable installation / usage report +# telemetryReporting 1 + +# enable/disable load balancing +# balance 1 + +# role for dnode. 0 - any, 1 - mnode, 2 - dnode +# role 0 + +# max timer control blocks +# maxTmrCtrl 512 + +# time interval of system monitor, seconds +# monitorInterval 30 + +# number of seconds allowed for a dnode to be offline, for cluster only +# offlineThreshold 864000 + +# RPC re-try timer, millisecond +# rpcTimer 300 + +# RPC maximum time for ack, seconds. +# rpcMaxTime 600 + +# time interval of dnode status reporting to mnode, seconds, for cluster only +# statusInterval 1 + +# time interval of heart beat from shell to dnode, seconds +# shellActivityTimer 3 + +# minimum sliding window time, milli-second +# minSlidingTime 10 + +# minimum time window, milli-second +# minIntervalTime 10 + +# maximum delay before launching a stream computation, milli-second +# maxStreamCompDelay 20000 + +# maximum delay before launching a stream computation for the first time, milli-second +# maxFirstStreamCompDelay 10000 + +# retry delay when a stream computation fails, milli-second +# retryStreamCompDelay 10 + +# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 +# streamCompDelayRatio 0.1 + +# max number of vgroups per db, 0 means configured automatically +# maxVgroupsPerDb 0 + +# max number of tables per vnode +# maxTablesPerVnode 1000000 + +# cache block size (Mbyte) +# cache 16 + +# number of cache blocks per vnode +# blocks 6 + +# number of days per DB file +# days 10 + +# number of days to keep DB file +# keep 3650 + +# minimum rows of records in file block +# minRows 100 + +# maximum rows of records in file block +# maxRows 4096 + +# the number of acknowledgments required for successful data writing +# quorum 1 + +# enable/disable compression +# comp 2 + +# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync +# walLevel 1 + +# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away +# fsync 3000 + +# number of replications, for cluster only +# replica 1 + +# the compressed rpc message, option: +# -1 (no compression) +# 0 (all message compressed), +# > 0 (rpc message body which larger than this value will be compressed) +# compressMsgSize -1 + +# max length of an SQL +# maxSQLLength 65480 + +# max length of WildCards +# maxWildCardsLength 100 + +# the maximum number of records allowed for super table time sorting +# maxNumOfOrderedRes 100000 + +# system time zone +# timezone Asia/Shanghai (CST, +0800) +# system time zone (for windows 10) +# timezone UTC-8 + +# system locale +# locale en_US.UTF-8 + +# default system charset +# charset UTF-8 + +# max number of connections allowed in dnode +# maxShellConns 5000 + +# max number of connections allowed in client +# maxConnections 5000 + +# stop writing logs when the disk size of the log folder is less than this value +# minimalLogDirGB 0.1 + +# stop writing temporary files when the disk size of the tmp folder is less than this value +# minimalTmpDirGB 0.1 + +# if disk free space is less than this value, powerd service exit directly within startup process +# minimalDataDirGB 0.1 + +# One mnode is equal to the number of vnode consumed +# mnodeEqualVnodeNum 4 + +# enbale/disable http service +# http 1 + +# enable/disable system monitor +# monitor 1 + +# enable/disable recording the SQL statements via restful interface +# httpEnableRecordSql 0 + +# number of threads used to process http requests +# httpMaxThreads 2 + +# maximum number of rows returned by the restful interface +# restfulRowLimit 10240 + +# The following parameter is used to limit the maximum number of lines in log files. +# max number of lines per log filters +# numOfLogLines 10000000 + +# enable/disable async log +# asyncLog 1 + +# time of keeping log files, days +# logKeepDays 0 + + +# The following parameters are used for debug purpose only. +# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR +# 131: output warning and error +# 135: output debug, warning and error +# 143: output trace, debug, warning and error to log +# 199: output debug, warning and error to both screen and file +# 207: output trace, debug, warning and error to both screen and file + +# debug flag for all log type, take effect when non-zero value +# debugFlag 0 + +# debug flag for meta management messages +# mDebugFlag 135 + +# debug flag for dnode messages +# dDebugFlag 135 + +# debug flag for sync module +# sDebugFlag 135 + +# debug flag for WAL +# wDebugFlag 135 + +# debug flag for SDB +# sdbDebugFlag 135 + +# debug flag for RPC +# rpcDebugFlag 131 + +# debug flag for TAOS TIMER +# tmrDebugFlag 131 + +# debug flag for TDengine client +# cDebugFlag 131 + +# debug flag for JNI +# jniDebugFlag 131 + +# debug flag for storage +# uDebugFlag 131 + +# debug flag for http server +# httpDebugFlag 131 + +# debug flag for monitor +# monDebugFlag 131 + +# debug flag for query +# qDebugFlag 131 + +# debug flag for vnode +# vDebugFlag 131 + +# debug flag for TSDB +# tsdbDebugFlag 131 + +# debug flag for continue query +# cqDebugFlag 131 + +# enable/disable recording the SQL in power client +# enableRecordSql 0 + +# generate core file when service crash +# enableCoreFile 1 + +# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden +# maxBinaryDisplayWidth 30 + +# enable/disable stream (continuous query) +# stream 1 + +# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode +# retrieveBlockingModel 0 + +# the maximum allowed query buffer size in MB during query processing for each data node +# -1 no limit (default) +# 0 no query allowed, queries are disabled +# queryBufferSize -1 + diff --git a/packaging/cfg/powerd.service b/packaging/cfg/powerd.service new file mode 100644 index 0000000000000000000000000000000000000000..5aaad07ee8e992e74d6bdbdd36fafbe2236ab658 --- /dev/null +++ b/packaging/cfg/powerd.service @@ -0,0 +1,21 @@ +[Unit] +Description=Power server service +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/powerd +ExecStartPre=/usr/local/power/bin/startPre.sh +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/cfg/prodb.cfg b/packaging/cfg/prodb.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f84ea63bd6791b050c67befbc0c16ecb0ee553f1 --- /dev/null +++ b/packaging/cfg/prodb.cfg @@ -0,0 +1,286 @@ +######################################################## +# # +# ProDB Configuration # +# Any questions, please email support@hanatech.com.cn # +# # +######################################################## + +# first fully qualified domain name (FQDN) for ProDB system +# firstEp hostname:6030 + +# local fully qualified domain name (FQDN) +# fqdn hostname + +# first port number for the connection (12 continuous UDP/TCP port number are used) +# serverPort 6030 + +# log file's directory +# logDir /var/log/ProDB + +# data file's directory +# dataDir /var/lib/ProDB + +# temporary file's directory +# tempDir /tmp/ + +# the arbitrator's fully qualified domain name (FQDN) for ProDB system, for cluster only +# arbitrator arbitrator_hostname:6042 + +# number of threads per CPU core +# numOfThreadsPerCore 1.0 + +# number of threads to commit cache data +# numOfCommitThreads 4 + +# the proportion of total CPU cores available for query processing +# 2.0: the query threads will be set to double of the CPU cores. +# 1.0: all CPU cores are available for query processing [default]. +# 0.5: only half of the CPU cores are available for query. +# 0.0: only one core available. +# ratioOfQueryCores 1.0 + +# the last_row/first/last aggregator will not change the original column name in the result fields +keepColumnName 1 + +# number of management nodes in the system +# numOfMnodes 3 + +# enable/disable backuping vnode directory when removing vnode +# vnodeBak 1 + +# enable/disable installation / usage report +# telemetryReporting 1 + +# enable/disable load balancing +# balance 1 + +# role for dnode. 0 - any, 1 - mnode, 2 - dnode +# role 0 + +# max timer control blocks +# maxTmrCtrl 512 + +# time interval of system monitor, seconds +# monitorInterval 30 + +# number of seconds allowed for a dnode to be offline, for cluster only +# offlineThreshold 864000 + +# RPC re-try timer, millisecond +# rpcTimer 300 + +# RPC maximum time for ack, seconds. +# rpcMaxTime 600 + +# time interval of dnode status reporting to mnode, seconds, for cluster only +# statusInterval 1 + +# time interval of heart beat from shell to dnode, seconds +# shellActivityTimer 3 + +# minimum sliding window time, milli-second +# minSlidingTime 10 + +# minimum time window, milli-second +# minIntervalTime 10 + +# maximum delay before launching a stream computation, milli-second +# maxStreamCompDelay 20000 + +# maximum delay before launching a stream computation for the first time, milli-second +# maxFirstStreamCompDelay 10000 + +# retry delay when a stream computation fails, milli-second +# retryStreamCompDelay 10 + +# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 +# streamCompDelayRatio 0.1 + +# max number of vgroups per db, 0 means configured automatically +# maxVgroupsPerDb 0 + +# max number of tables per vnode +# maxTablesPerVnode 1000000 + +# cache block size (Mbyte) +# cache 16 + +# number of cache blocks per vnode +# blocks 6 + +# number of days per DB file +# days 10 + +# number of days to keep DB file +# keep 3650 + +# minimum rows of records in file block +# minRows 100 + +# maximum rows of records in file block +# maxRows 4096 + +# the number of acknowledgments required for successful data writing +# quorum 1 + +# enable/disable compression +# comp 2 + +# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync +# walLevel 1 + +# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away +# fsync 3000 + +# number of replications, for cluster only +# replica 1 + +# the compressed rpc message, option: +# -1 (no compression) +# 0 (all message compressed), +# > 0 (rpc message body which larger than this value will be compressed) +# compressMsgSize -1 + +# max length of an SQL +# maxSQLLength 65480 + +# max length of WildCards +# maxWildCardsLength 100 + +# the maximum number of records allowed for super table time sorting +# maxNumOfOrderedRes 100000 + +# system time zone +# timezone Asia/Shanghai (CST, +0800) +# system time zone (for windows 10) +# timezone UTC-8 + +# system locale +# locale en_US.UTF-8 + +# default system charset +# charset UTF-8 + +# max number of connections allowed in dnode +# maxShellConns 5000 + +# max number of connections allowed in client +# maxConnections 5000 + +# stop writing logs when the disk size of the log folder is less than this value +# minimalLogDirGB 0.1 + +# stop writing temporary files when the disk size of the tmp folder is less than this value +# minimalTmpDirGB 0.1 + +# if disk free space is less than this value, prodbs service exit directly within startup process +# minimalDataDirGB 0.1 + +# One mnode is equal to the number of vnode consumed +# mnodeEqualVnodeNum 4 + +# enbale/disable http service +# http 1 + +# enable/disable system monitor +# monitor 1 + +# enable/disable recording the SQL statements via restful interface +# httpEnableRecordSql 0 + +# number of threads used to process http requests +# httpMaxThreads 2 + +# maximum number of rows returned by the restful interface +# restfulRowLimit 10240 + +# The following parameter is used to limit the maximum number of lines in log files. +# max number of lines per log filters +# numOfLogLines 10000000 + +# enable/disable async log +# asyncLog 1 + +# time of keeping log files, days +# logKeepDays 0 + + +# The following parameters are used for debug purpose only. +# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR +# 131: output warning and error +# 135: output debug, warning and error +# 143: output trace, debug, warning and error to log +# 199: output debug, warning and error to both screen and file +# 207: output trace, debug, warning and error to both screen and file + +# debug flag for all log type, take effect when non-zero value +# debugFlag 0 + +# debug flag for meta management messages +# mDebugFlag 135 + +# debug flag for dnode messages +# dDebugFlag 135 + +# debug flag for sync module +# sDebugFlag 135 + +# debug flag for WAL +# wDebugFlag 135 + +# debug flag for SDB +# sdbDebugFlag 135 + +# debug flag for RPC +# rpcDebugFlag 131 + +# debug flag for TAOS TIMER +# tmrDebugFlag 131 + +# debug flag for ProDB client +# cDebugFlag 131 + +# debug flag for JNI +# jniDebugFlag 131 + +# debug flag for storage +# uDebugFlag 131 + +# debug flag for http server +# httpDebugFlag 131 + +# debug flag for monitor +# monDebugFlag 131 + +# debug flag for query +# qDebugFlag 131 + +# debug flag for vnode +# vDebugFlag 131 + +# debug flag for TSDB +# tsdbDebugFlag 131 + +# debug flag for continue query +# cqDebugFlag 131 + +# enable/disable recording the SQL in prodb client +# enableRecordSql 0 + +# generate core file when service crash +# enableCoreFile 1 + +# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden +# maxBinaryDisplayWidth 30 + +# enable/disable stream (continuous query) +# stream 1 + +# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode +# retrieveBlockingModel 0 + +# the maximum allowed query buffer size in MB during query processing for each data node +# -1 no limit (default) +# 0 no query allowed, queries are disabled +# queryBufferSize -1 + diff --git a/packaging/cfg/prodbs.service b/packaging/cfg/prodbs.service new file mode 100644 index 0000000000000000000000000000000000000000..4d5108989474a1a3e9c3c7a11c6b1136fb16c67c --- /dev/null +++ b/packaging/cfg/prodbs.service @@ -0,0 +1,21 @@ +[Unit] +Description=ProDB server service +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/prodbs +ExecStartPre=/usr/local/ProDB/bin/startPre.sh +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 59f87b0a1f8d3aa192383457a85e1d53b1a3bf54..46e27b98e9339cb0cb0cb0d8fe0d038bde926148 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -43,7 +43,7 @@ keepColumnName 1 # number of management nodes in the system -# numOfMnodes 3 +# numOfMnodes 1 # enable/disable backuping vnode directory when removing vnode # vnodeBak 1 diff --git a/packaging/cfg/taosd.service b/packaging/cfg/taosd.service index 452488b4e951e36c043c823e17cca5ab7dbfd21b..fff4b74e62a6da8f2bda9a6306a79132d7585e42 100644 --- a/packaging/cfg/taosd.service +++ b/packaging/cfg/taosd.service @@ -1,7 +1,7 @@ [Unit] Description=TDengine server service -After=network-online.target taosadapter.service -Wants=network-online.target taosadapter.service +After=network-online.target +Wants=network-online.target [Service] Type=simple diff --git a/packaging/cfg/tq.cfg b/packaging/cfg/tq.cfg new file mode 100644 index 0000000000000000000000000000000000000000..284335a4ad9d5b60cc36a0d722e8c6994f20a812 --- /dev/null +++ b/packaging/cfg/tq.cfg @@ -0,0 +1,286 @@ +######################################################## +# # +# TQueue Configuration # +# Any questions, please email support@taosdata.com # +# # +######################################################## + +# first fully qualified domain name (FQDN) for TQueue system +# firstEp hostname:6030 + +# local fully qualified domain name (FQDN) +# fqdn hostname + +# first port number for the connection (12 continuous UDP/TCP port number are used) +# serverPort 6030 + +# log file's directory +# logDir /var/log/tq + +# data file's directory +# dataDir /var/lib/tq + +# temporary file's directory +# tempDir /tmp/ + +# the arbitrator's fully qualified domain name (FQDN) for TQueue system, for cluster only +# arbitrator arbitrator_hostname:6042 + +# number of threads per CPU core +# numOfThreadsPerCore 1.0 + +# number of threads to commit cache data +# numOfCommitThreads 4 + +# the proportion of total CPU cores available for query processing +# 2.0: the query threads will be set to double of the CPU cores. +# 1.0: all CPU cores are available for query processing [default]. +# 0.5: only half of the CPU cores are available for query. +# 0.0: only one core available. +# ratioOfQueryCores 1.0 + +# the last_row/first/last aggregator will not change the original column name in the result fields +keepColumnName 1 + +# number of management nodes in the system +# numOfMnodes 3 + +# enable/disable backuping vnode directory when removing vnode +# vnodeBak 1 + +# enable/disable installation / usage report +# telemetryReporting 1 + +# enable/disable load balancing +# balance 1 + +# role for dnode. 0 - any, 1 - mnode, 2 - dnode +# role 0 + +# max timer control blocks +# maxTmrCtrl 512 + +# time interval of system monitor, seconds +# monitorInterval 30 + +# number of seconds allowed for a dnode to be offline, for cluster only +# offlineThreshold 864000 + +# RPC re-try timer, millisecond +# rpcTimer 300 + +# RPC maximum time for ack, seconds. +# rpcMaxTime 600 + +# time interval of dnode status reporting to mnode, seconds, for cluster only +# statusInterval 1 + +# time interval of heart beat from shell to dnode, seconds +# shellActivityTimer 3 + +# minimum sliding window time, milli-second +# minSlidingTime 10 + +# minimum time window, milli-second +# minIntervalTime 10 + +# maximum delay before launching a stream computation, milli-second +# maxStreamCompDelay 20000 + +# maximum delay before launching a stream computation for the first time, milli-second +# maxFirstStreamCompDelay 10000 + +# retry delay when a stream computation fails, milli-second +# retryStreamCompDelay 10 + +# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9 +# streamCompDelayRatio 0.1 + +# max number of vgroups per db, 0 means configured automatically +# maxVgroupsPerDb 0 + +# max number of tables per vnode +# maxTablesPerVnode 1000000 + +# cache block size (Mbyte) +# cache 16 + +# number of cache blocks per vnode +# blocks 6 + +# number of days per DB file +# days 10 + +# number of days to keep DB file +# keep 3650 + +# minimum rows of records in file block +# minRows 100 + +# maximum rows of records in file block +# maxRows 4096 + +# the number of acknowledgments required for successful data writing +# quorum 1 + +# enable/disable compression +# comp 2 + +# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync +# walLevel 1 + +# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away +# fsync 3000 + +# number of replications, for cluster only +# replica 1 + +# the compressed rpc message, option: +# -1 (no compression) +# 0 (all message compressed), +# > 0 (rpc message body which larger than this value will be compressed) +# compressMsgSize -1 + +# max length of an SQL +# maxSQLLength 65480 + +# max length of WildCards +# maxWildCardsLength 100 + +# the maximum number of records allowed for super table time sorting +# maxNumOfOrderedRes 100000 + +# system time zone +# timezone Asia/Shanghai (CST, +0800) +# system time zone (for windows 10) +# timezone UTC-8 + +# system locale +# locale en_US.UTF-8 + +# default system charset +# charset UTF-8 + +# max number of connections allowed in dnode +# maxShellConns 5000 + +# max number of connections allowed in client +# maxConnections 5000 + +# stop writing logs when the disk size of the log folder is less than this value +# minimalLogDirGB 0.1 + +# stop writing temporary files when the disk size of the tmp folder is less than this value +# minimalTmpDirGB 0.1 + +# if disk free space is less than this value, tqd service exit directly within startup process +# minimalDataDirGB 0.1 + +# One mnode is equal to the number of vnode consumed +# mnodeEqualVnodeNum 4 + +# enbale/disable http service +# http 1 + +# enable/disable system monitor +# monitor 1 + +# enable/disable recording the SQL statements via restful interface +# httpEnableRecordSql 0 + +# number of threads used to process http requests +# httpMaxThreads 2 + +# maximum number of rows returned by the restful interface +# restfulRowLimit 10240 + +# The following parameter is used to limit the maximum number of lines in log files. +# max number of lines per log filters +# numOfLogLines 10000000 + +# enable/disable async log +# asyncLog 1 + +# time of keeping log files, days +# logKeepDays 0 + + +# The following parameters are used for debug purpose only. +# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR +# 131: output warning and error +# 135: output debug, warning and error +# 143: output trace, debug, warning and error to log +# 199: output debug, warning and error to both screen and file +# 207: output trace, debug, warning and error to both screen and file + +# debug flag for all log type, take effect when non-zero value +# debugFlag 0 + +# debug flag for meta management messages +# mDebugFlag 135 + +# debug flag for dnode messages +# dDebugFlag 135 + +# debug flag for sync module +# sDebugFlag 135 + +# debug flag for WAL +# wDebugFlag 135 + +# debug flag for SDB +# sdbDebugFlag 135 + +# debug flag for RPC +# rpcDebugFlag 131 + +# debug flag for TAOS TIMER +# tmrDebugFlag 131 + +# debug flag for TQueue client +# cDebugFlag 131 + +# debug flag for JNI +# jniDebugFlag 131 + +# debug flag for storage +# uDebugFlag 131 + +# debug flag for http server +# httpDebugFlag 131 + +# debug flag for monitor +# monDebugFlag 131 + +# debug flag for query +# qDebugFlag 131 + +# debug flag for vnode +# vDebugFlag 131 + +# debug flag for TSDB +# tsdbDebugFlag 131 + +# debug flag for continue query +# cqDebugFlag 131 + +# enable/disable recording the SQL in tq client +# enableRecordSql 0 + +# generate core file when service crash +# enableCoreFile 1 + +# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden +# maxBinaryDisplayWidth 30 + +# enable/disable stream (continuous query) +# stream 1 + +# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode +# retrieveBlockingModel 0 + +# the maximum allowed query buffer size in MB during query processing for each data node +# -1 no limit (default) +# 0 no query allowed, queries are disabled +# queryBufferSize -1 + diff --git a/packaging/cfg/tqd.service b/packaging/cfg/tqd.service new file mode 100644 index 0000000000000000000000000000000000000000..805a019f12a1eb26f16ddb3c2be0ae49e9f9b0e0 --- /dev/null +++ b/packaging/cfg/tqd.service @@ -0,0 +1,21 @@ +[Unit] +Description=TQ server service +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/tqd +ExecStartPre=/usr/local/tq/bin/startPre.sh +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/deb/powerd b/packaging/deb/powerd deleted file mode 100644 index bb77aab1660545c62e5db27b8a37d5d5937f623f..0000000000000000000000000000000000000000 --- a/packaging/deb/powerd +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -# -# Modified from original source: Elastic Search -# https://github.com/elasticsearch/elasticsearch -# Thank you to the Elastic Search authors -# -# chkconfig: 2345 99 01 -# -### BEGIN INIT INFO -# Provides: PowerDB -# Required-Start: $local_fs $network $syslog -# Required-Stop: $local_fs $network $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Starts PowerDB powerd -# Description: Starts PowerDB powerd, a time-series database engine -### END INIT INFO - -set -e - -PATH="/bin:/usr/bin:/sbin:/usr/sbin" -NAME="PowerDB" -USER="root" -GROUP="root" -DAEMON="/usr/local/power/bin/powerd" -DAEMON_OPTS="" -PID_FILE="/var/run/$NAME.pid" -APPARGS="" - -# Maximum number of open files -MAX_OPEN_FILES=65535 - -. /lib/lsb/init-functions - -case "$1" in - start) - - log_action_begin_msg "Starting PowerDB..." - if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then - - touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE" - - if [ -n "$MAX_OPEN_FILES" ]; then - ulimit -n $MAX_OPEN_FILES - fi - - start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS - - log_end_msg $? - fi - ;; - - stop) - log_action_begin_msg "Stopping PowerDB..." - set +e - if [ -f "$PID_FILE" ]; then - start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null - if [ $? -eq 1 ]; then - log_action_cont_msg "TSD is not running but pid file exists, cleaning up" - elif [ $? -eq 3 ]; then - PID="`cat $PID_FILE`" - log_failure_msg "Failed to stop PowerDB (pid $PID)" - exit 1 - fi - rm -f "$PID_FILE" - else - log_action_cont_msg "PowerDB was not running" - fi - log_action_end_msg 0 - set -e - ;; - - restart|force-reload) - if [ -f "$PID_FILE" ]; then - $0 stop - sleep 1 - fi - $0 start - ;; - status) - status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME" - ;; - *) - exit 1 - ;; -esac - -exit 0 diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index a54e9ca499330855b22daf523286ea5bbc509bb8..4f847f949a25e157261cc42b20ece0c9072e328f 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -11,12 +11,16 @@ RUN tar -zxf ${pkgFile} WORKDIR /root/${dirName}/ RUN /bin/bash install.sh -e no -RUN apt-get clean && apt-get update && apt-get install -y locales && locale-gen en_US.UTF-8 +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ LC_CTYPE=en_US.UTF-8 \ LANG=en_US.UTF-8 \ LC_ALL=en_US.UTF-8 -EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 -CMD ["run_taosd.sh"] -VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ] +EXPOSE 6030-6049 +EXPOSE 6030-6039/udp +COPY ./bin/* /usr/bin/ +ENTRYPOINT ["/usr/bin/entrypoint.sh"] +CMD ["taosd"] +VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ] diff --git a/packaging/docker/README.md b/packaging/docker/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e41182f471050af6b4d47b696eb237e319b2dd80 --- /dev/null +++ b/packaging/docker/README.md @@ -0,0 +1,664 @@ +# TDengine Docker Image Quick Reference + +## What is TDengine? + +TDengine is an open-sourced big data platform under [GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html), designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and cost of development and operation. + +- **10x Faster on Insert/Query Speeds**: Through the innovative design on storage, on a single-core machine, over 20K requests can be processed, millions of data points can be ingested, and over 10 million data points can be retrieved in a second. It is 10 times faster than other databases. + +- **1/5 Hardware/Cloud Service Costs**: Compared with typical big data solutions, less than 1/5 of computing resources are required. Via column-based storage and tuned compression algorithms for different data types, less than 1/10 of storage space is needed. + +- **Full Stack for Time-Series Data**: By integrating a database with message queuing, caching, and stream computing features together, it is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software. It makes the system architecture much simpler and more robust. + +- **Powerful Data Analysis**: Whether it is 10 years or one minute ago, data can be queried just by specifying the time range. Data can be aggregated over time, multiple time streams or both. Ad Hoc queries or analyses can be executed via TDengine shell, Python, R or Matlab. + +- **Seamless Integration with Other Tools**: Telegraf, Grafana, Matlab, R, and other tools can be integrated with TDengine without a line of code. MQTT, OPC, Hadoop, Spark, and many others will be integrated soon. + +- **Zero Management, No Learning Curve**: It takes only seconds to download, install, and run it successfully; there are no other dependencies. Automatic partitioning on tables or DBs. Standard SQL is used, with C/C++, Python, JDBC, Go and RESTful connectors. + +## How to use this image + +### Start a TDengine instance with RESTful API exposed + +Simply, you can use `docker run` to start a TDengine instance and connect it with restful connectors(eg. [JDBC-RESTful](https://www.taosdata.com/cn/documentation/connector/java)). + +```bash +docker run -d --name tdengine -p 6041:6041 tdengine/tdengine +``` + +This command starts a docker container by name `tdengine` with TDengine server running, and maps the container's HTTP port 6041 to the host's port 6041. If you have `curl` in your host, you can list the databases by the command: + +```bash +curl -u root:taosdata -d "show databases" localhost:6041/rest/sql +``` + +You can execute the `taos` shell command in the container: + +```bash +$ docker exec -it tdengine taos + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | +Query OK, 1 row(s) in set (0.002843s) +``` + +Since TDengine use container hostname to establish connections, it's a bit more complex to use taos shell and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use taos shell or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need. + +### Start with host network + +```bash +docker run -d --name tdengine --network host tdengine/tdengine +``` + +Starts container with `host` network will use host's hostname as fqdn instead of container id. It's much like starting natively with `systemd` in host. After installing the client, you can use `taos` shell as normal in host path. + +```bash +$ taos + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> show dnodes; + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | host:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | | +Query OK, 1 row(s) in set (0.003233s) +``` + +### Start with exposed ports and specified hostname + +Set the fqdn explicitly will help you to use in other environment or applications. We provide environment variable `TAOS_FQDN` or `fqdn` config option to explicitly set the hostname used by TDengine container instance(s). + +Use `TAOS_FQDN` variable within `docker run` command: + +```bash +docker run -d \ + --name tdengine \ + -e TAOS_FQDN=tdengine \ + -p 6030-6049:6030-6049 \ + -p 6030-6049:6030-6049/udp \ + tdengine/tdengine +``` + +This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`). + +If you want to use taos shell or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service. + +If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable): + +```bash +echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts +``` + +Then you can use `taos` with the host `tdengine`: + +```bash +taos -h tdengine +``` + +Or develop/test applications with native connectors. As in python: + +```python +import taos; +conn = taos.connect(host = "tdengine") +res = conn.query("show databases") +for row in res.fetch_all_into_dict(): + print(row) +``` + +See the results: + +```bash +Python 3.8.10 (default, Nov 26 2021, 20:14:08) +[GCC 9.3.0] on linux +Type "help", "copyright", "credits" or "license" for more information. +>>> import taos; +>>> conn = taos.connect(host = "tdengine") +>>> res = conn.query("show databases") +>>> for row in res.fetch_all_into_dict(): +... print(row) +... +{'name': 'log', 'created_time': datetime.datetime(2022, 1, 17, 22, 56, 2, 490000), 'ntables': 11, 'vgroups': 1, 'replica': 1, 'quorum': 1, 'days': 10, 'keep': '30', 'cache(MB)': 1, 'blocks': 3, 'minrows': 100, 'maxrows': 4096, 'wallevel': 1, 'fsync': 3000, 'comp': 2, 'cachelast': 0, 'precision': 'us', 'update': 0, 'status': 'ready'} +``` + +### Start with specific network + +Alternatively, you can use TDengine natively by using specific network. + +First, create network for TDengine server and client/application. + +```bash +docker network create td-net +``` + +Start TDengine instance with service name as fqdn (explicitly set with `TAOS_FQDN`): + +```bash +docker run -d --name tdengine --network td-net \ + -e TAOS_FQDN=tdengine \ + tdengine/tdengine +``` + +Start TDengine client in another container with the specific network: + +```bash +docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos +# or +docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine +``` + +When you build your application with docker, you should add the TDengine client in the dockerfile, as based on `ubuntu:20.04` image, install the client like this: + +```dockerfile +FROM ubuntu:20.04 +RUN apt-get update && apt-get install -y wget +ENV TDENGINE_VERSION=2.4.0.0 +RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ + && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ + && cd TDengine-client-${TDENGINE_VERSION} \ + && ./install_client.sh \ + && cd ../ \ + && rm -rf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz TDengine-client-${TDENGINE_VERSION} +## add your application next, eg. go, build it in builder stage, copy the binary to the runtime +#COPY --from=builder /path/to/build/app /usr/bin/ +#CMD ["app"] +``` + +Here is an Go example app: + + + + +```go +/* + * In this test program, we'll create a database and insert 4 records then select out. + */ +package main + +import ( + "database/sql" + "flag" + "fmt" + "time" + + _ "github.com/taosdata/driver-go/v2/taosSql" +) + +type config struct { + hostName string + serverPort string + user string + password string +} + +var configPara config +var taosDriverName = "taosSql" +var url string + +func init() { + flag.StringVar(&configPara.hostName, "h", "", "The host to connect to TDengine server.") + flag.StringVar(&configPara.serverPort, "p", "", "The TCP/IP port number to use for the connection to TDengine server.") + flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") + flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") + flag.Parse() +} + +func printAllArgs() { + fmt.Printf("============= args parse result: =============\n") + fmt.Printf("hostName: %v\n", configPara.hostName) + fmt.Printf("serverPort: %v\n", configPara.serverPort) + fmt.Printf("usr: %v\n", configPara.user) + fmt.Printf("password: %v\n", configPara.password) + fmt.Printf("================================================\n") +} + +func main() { + printAllArgs() + + url = "root:taosdata@/tcp(" + configPara.hostName + ":" + configPara.serverPort + ")/" + + taos, err := sql.Open(taosDriverName, url) + checkErr(err, "open database error") + defer taos.Close() + + taos.Exec("create database if not exists test") + taos.Exec("use test") + taos.Exec("create table if not exists tb1 (ts timestamp, a int)") + _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + checkErr(err, "failed to insert") + rows, err := taos.Query("select * from tb1") + checkErr(err, "failed to select") + + defer rows.Close() + for rows.Next() { + var r struct { + ts time.Time + a int + } + err := rows.Scan(&r.ts, &r.a) + if err != nil { + fmt.Println("scan error:\n", err) + return + } + fmt.Println(r.ts, r.a) + } +} + +func checkErr(err error, prompt string) { + if err != nil { + fmt.Println("ERROR: %s\n", prompt) + panic(err) + } +} +``` + + + + +Full version of dockerfile could be: + +```dockerfile +FROM golang:1.17.6-buster as builder +ENV TDENGINE_VERSION=2.4.0.0 +RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ + && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ + && cd TDengine-client-${TDENGINE_VERSION} \ + && ./install_client.sh \ + && cd ../ \ + && rm -rf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz TDengine-client-${TDENGINE_VERSION} +WORKDIR /usr/src/app/ +ENV GOPROXY="https://goproxy.io,direct" +COPY ./main.go ./go.mod ./go.sum /usr/src/app/ +RUN go env && go mod tidy && go build + +FROM ubuntu:20.04 +RUN apt-get update && apt-get install -y wget +ENV TDENGINE_VERSION=2.4.0.0 +RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ + && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ + && cd TDengine-client-${TDENGINE_VERSION} \ + && ./install_client.sh \ + && cd ../ \ + && rm -rf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz TDengine-client-${TDENGINE_VERSION} + +## add your application next, eg. go, build it in builder stage, copy the binary to the runtime +COPY --from=builder /usr/src/app/app /usr/bin/ +CMD ["app"] +``` + +Suppose you have `main.go`, `go.mod` `go.sum`, `app.dockerfile`, build the app and run it with network `td-net`: + +```bash +$ docker build -t app -f app.dockerfile +$ docker run --rm --network td-net app -h tdengine -p 6030 +============= args parse result: ============= +hostName: tdengine +serverPort: 6030 +usr: root +password: taosdata +================================================ +2022-01-17 15:56:55.48 +0000 UTC 0 +2022-01-17 15:56:56.48 +0000 UTC 1 +2022-01-17 15:56:57.48 +0000 UTC 2 +2022-01-17 15:56:58.48 +0000 UTC 3 +2022-01-17 15:58:01.842 +0000 UTC 0 +2022-01-17 15:58:02.842 +0000 UTC 1 +2022-01-17 15:58:03.842 +0000 UTC 2 +2022-01-17 15:58:04.842 +0000 UTC 3 +2022-01-18 01:43:48.029 +0000 UTC 0 +2022-01-18 01:43:49.029 +0000 UTC 1 +2022-01-18 01:43:50.029 +0000 UTC 2 +2022-01-18 01:43:51.029 +0000 UTC 3 +``` + +Now you must be much familiar with developing and testing with TDengine, let's see some more complex cases. + +### Start with docker-compose with multiple nodes(instances) + +Start a 2-replicas-2-mnodes-2-dnodes-1-arbitrator TDengine cluster with `docker-compose` is quite simple. Save the file as `docker-compose.yml`: + +```yaml +version: "3" +services: + arbitrator: + image: tdengine/tdengine:$VERSION + command: tarbitrator + td-1: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + TAOS_NUM_OF_MNODES: "2" + TAOS_REPLICA: "2" + TAOS_ARBITRATOR: arbitrator:6042 + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + TAOS_NUM_OF_MNODES: "2" + TAOS_REPLICA: "2" + TAOS_ARBITRATOR: arbitrator:6042 + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: +``` + +You may notice that: + +- We use `VERSION` environment variable to set `tdengine` image tag version once. +- **`TAOS_FIRST_EP`** **MUST** be set to join the newly created instances into an existing TDengine cluster. If you want more instances, use `TAOS_SECOND_EP` in case of HA(High Availability) concerns. +- `TAOS_NUM_OF_MNODES` is for setting number of mnodes for the cluster. +- `TAOS_REPLICA` set the default database replicas, `2` means there're one master and one slave copy of data. The `replica` option should be `1 <= replica <= 3`, and not greater than dnodes number. +- `TAOS_ARBITRATOR` set the arbitrator entrypoint of the cluster for failover/election stuff. It's better to use arbitrator in a two nodes cluster. +- The way to start an arbitrator service is as easy as abc: just add command name `tarbitrator`(which is the binary name of arbitrator daemon) in docker-compose service option: `command: tarbitrator`, and everything is ok now. + +Now run `docker-compose up -d` with version specified: + +```bash +$ VERSION=2.4.0.0 docker-compose up -d +Creating network "test_default" with the default driver +Creating volume "test_taosdata-td1" with default driver +Creating volume "test_taoslog-td1" with default driver +Creating volume "test_taosdata-td2" with default driver +Creating volume "test_taoslog-td2" with default driver +Creating test_td-1_1 ... done +Creating test_arbitrator_1 ... done +Creating test_td-2_1 ... done +``` + +Check the status: + +```bash +$ docker-compose ps + Name Command State Ports +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp +test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp +test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp +``` + +Check dnodes with taos shell: + +```bash +$ docker-compose exec td-1 taos -s "show dnodes" + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> show dnodes + id | end_point | vnodes | cores | status | role | create_time | offline reason | +====================================================================================================================================== + 1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | | + 2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | | + 0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - | +Query OK, 3 row(s) in set (0.000811s) +``` + +### Start a TDengine cluster with scaled taosadapter service + +In previous use case, you could see the way to start other services built with TDengine(`taosd` as the default command). There's another important service you should know: + +> **taosAdapter** is a TDengine’s companion tool and is a bridge/adapter between TDengine cluster and application. It provides an easy-to-use and efficient way to ingest data from data collections agents(like Telegraf, StatsD, CollectD) directly. It also provides InfluxDB/OpenTSDB compatible data ingestion interface to allow InfluxDB/OpenTSDB applications to immigrate to TDengine seamlessly. + +`taosadapter` is running inside `tdengine` image by default, you can disable it by `TAOS_DISABLE_ADAPTER=true`. Running `taosadapter` in a separate container is like how `arbitrator` does: + +```yaml +services: + # ... + adapter: + image: tdengine/tdengine:$VERSION + command: taosadapter +``` + +`taosadapter` could be scaled with docker-compose, so that you can manage the `taosadapter` nodes easily. Here is an example shows 4-`taosadapter` instances in a TDengine cluster(much like previous use cases): + +```yaml +version: "3" + +networks: + inter: + api: + +services: + arbitrator: + image: tdengine/tdengine:$VERSION + command: tarbitrator + networks: + - inter + td-1: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + TAOS_NUM_OF_MNODES: "2" + TAOS_REPLICA: "2" + TAOS_ARBITRATOR: arbitrator:6042 + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + TAOS_NUM_OF_MNODES: "2" + TAOS_REPLICA: "2" + TAOS_ARBITRATOR: arbitrator:6042 + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ + adapter: + image: tdengine/tdengine:$VERSION + command: taosadapter + networks: + - inter + environment: + TAOS_FIRST_EP: "td-1" + TAOS_SECOND_EP: "td-2" + deploy: + replicas: 4 + nginx: + image: nginx + depends_on: + - adapter + networks: + - inter + - api + ports: + - 6041:6041 + - 6044:6044/udp + command: [ + "sh", + "-c", + "while true; + do curl -s http://adapter:6041/-/ping >/dev/null && break; + done; + printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' + > /etc/nginx/conf.d/rest.conf; + printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' + >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; + nginx -g 'daemon off;'", + ] +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: +``` + +Start the cluster: + +```bash +$ VERSION=2.4.0.0 docker-compose up -d +Creating network "docker_inter" with the default driver +Creating network "docker_api" with the default driver +Creating volume "docker_taosdata-td1" with default driver +Creating volume "docker_taoslog-td1" with default driver +Creating volume "docker_taosdata-td2" with default driver +Creating volume "docker_taoslog-td2" with default driver +Creating docker_td-2_1 ... done +Creating docker_arbitrator_1 ... done +Creating docker_td-1_1 ... done +Creating docker_adapter_1 ... done +Creating docker_adapter_2 ... done +Creating docker_adapter_3 ... done +``` + +It will start a TDengine cluster with two dnodes and four taosadapter instances, expose ports 6041/tcp and 6044/udp to host. + +`6041` is the RESTful API endpoint port, you can verify that the RESTful interface taosAdapter provides working using the `curl` command. + +```bash +$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2022-01-18 04:37:42.902",16,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} +``` + +If you run curl in batch(here we use [hyperfine](https://github.com/sharkdp/hyperfine) - a command-line benchmarking tool), the requests are balanced into 4 adapter instances. + +```bash +hyperfine -m10 'curl -u root:taosdata localhost:6041/rest/sql -d "describe log.log"' +``` + +View the logs with `docker-compose logs`: + +```bash +$ docker-compose logs adapter +# some logs skipped +adapter_2 | 01/18 04:57:44.616529 00000039 TAOS_ADAPTER info "| 200 | 162.185µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 +adapter_1 | 01/18 04:57:44.627695 00000039 TAOS_ADAPTER info "| 200 | 145.485µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=17 +adapter_3 | 01/18 04:57:44.639165 00000040 TAOS_ADAPTER info "| 200 | 146.913µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web +adapter_4 | 01/18 04:57:44.650829 00000039 TAOS_ADAPTER info "| 200 | 153.201µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web +adapter_2 | 01/18 04:57:44.662422 00000039 TAOS_ADAPTER info "| 200 | 211.393µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 +adapter_1 | 01/18 04:57:44.673426 00000039 TAOS_ADAPTER info "| 200 | 154.714µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 +adapter_3 | 01/18 04:57:44.684788 00000040 TAOS_ADAPTER info "| 200 | 131.876µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 +adapter_4 | 01/18 04:57:44.696261 00000039 TAOS_ADAPTER info "| 200 | 162.173µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 +adapter_2 | 01/18 04:57:44.707414 00000039 TAOS_ADAPTER info "| 200 | 164.419µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 +adapter_1 | 01/18 04:57:44.720842 00000039 TAOS_ADAPTER info "| 200 | 179.374µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 +adapter_3 | 01/18 04:57:44.732184 00000040 TAOS_ADAPTER info "| 200 | 141.174µs | 172.21.0.9 | POST | /rest/sql " sessionID=19 model=web +adapter_4 | 01/18 04:57:44.744024 00000039 TAOS_ADAPTER info "| 200 | 159.774µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 +adapter_2 | 01/18 04:57:44.773732 00000039 TAOS_ADAPTER info "| 200 | 178.993µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=21 +adapter_1 | 01/18 04:57:44.796518 00000039 TAOS_ADAPTER info "| 200 | 238.24µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 +adapter_3 | 01/18 04:57:44.810744 00000040 TAOS_ADAPTER info "| 200 | 176.133µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 +adapter_4 | 01/18 04:57:44.826395 00000039 TAOS_ADAPTER info "| 200 | 149.215µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 +``` + +`6044/udp` is the [StatsD](https://github.com/statsd/statsd)-compatible port, you can verify this feature with `nc` command(usually provided by `netcat` package). + +```bash +echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 +``` + +Check the result in `taos` shell with `docker-compose exec`: + +```bash +$ dc exec td-1 taos + +Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> show databases; + name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | +==================================================================================================================================================================================================================================================================================== + log | 2022-01-18 04:37:42.902 | 17 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | + statsd | 2022-01-18 04:45:02.563 | 1 | 1 | 2 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | +Query OK, 2 row(s) in set (0.001838s) + +taos> select * from statsd.foo; + ts | value | metric_type | +======================================================================================= + 2022-01-18 04:45:02.563422822 | 1 | counter | +Query OK, 1 row(s) in set (0.003854s) +``` + +Use `docker-compose up -d adapter=1 to reduce the instances to 1 + +### Deploy TDengine cluster in Docker Swarm with `docker-compose.yml` + +If you use docker swarm mode, it will schedule arbitrator/taosd/taosadapter services into different hosts automatically. If you've no experience with k8s/kubernetes, this is the most convenient way to scale out the TDengine cluster with multiple hosts/servers. + +Use the `docker-compose.yml` file in previous use case, and deploy with `docker stack` or `docker deploy`: + +```bash +$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos +Creating network taos_inter +Creating network taos_api +Creating service taos_arbitrator +Creating service taos_td-1 +Creating service taos_td-2 +Creating service taos_adapter +Creating service taos_nginx +``` + +Now you've created a TDengine cluster with multiple host servers. + +Use `docker service` or `docker stack` to manage the cluster: + + + +```bash +$ docker stack ps taos +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago +3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago +100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago +pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago +tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago +rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago +i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago +lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago +$ docker service ls +ID NAME MODE REPLICAS IMAGE PORTS +561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0 +3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0 +d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp +2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0 +9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0 +``` + + + +It shows that there are two dnodes, one arbitrator, four taosadapter and one nginx reverse-forward service in this cluster. + +You can scale down the taosadapter replicas to `1` by `docker service`: + +```bash +$ docker service scale taos_adapter=1 +taos_adapter scaled to 1 +overall progress: 1 out of 1 tasks +1/1: running [==================================================>] +verify: Service converged + +$ docker service ls -f name=taos_adapter +ID NAME MODE REPLICAS IMAGE PORTS +561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0 +``` + +Now it remains only 1 taosadapter instance in the cluster. + +When you want to remove the cluster, just type: + +```bash +docker stack rm taos +``` + +### Environment Variables + +When you start `tdengine` image, you can adjust the configuration of TDengine by passing environment variables on the `docker run` command line or in the docker compose file. You can use all of the environment variables that passed to taosd or taosadapter. diff --git a/packaging/docker/bin/entrypoint.sh b/packaging/docker/bin/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..5fb441004d8b454de1039eb3f4b23eb51f32be64 --- /dev/null +++ b/packaging/docker/bin/entrypoint.sh @@ -0,0 +1,83 @@ +#!/bin/sh +set -e +# for TZ awareness +if [ "$TZ" != "" ]; then + ln -sf /usr/share/zoneinfo/$TZ /etc/localtime + echo $TZ >/etc/timezone +fi + +# option to disable taosadapter, default is no +DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0} +unset TAOS_DISABLE_ADAPTER + +# to get mnodeEpSet from data dir +DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos} + +# append env to custom taos.cfg +CFG_DIR=/tmp/taos +CFG_FILE=$CFG_DIR/taos.cfg + +mkdir -p $CFG_DIR >/dev/null 2>&1 + +[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE +env-to-cfg >>$CFG_FILE + +FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//') + +# ensure the fqdn is resolved as localhost +grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts + +# parse first ep host and port +FIRST_EP_HOST=${TAOS_FIRST_EP%:*} +FIRST_EP_PORT=${TAOS_FIRST_EP#*:} + +# in case of custom server port +SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//') +SERVER_PORT=${SERVER_PORT:-6030} + +# for other binaries like interpreters +if echo $1 | grep -E "taosd$" - >/dev/null; then + true # will run taosd +else + cp -f $CFG_FILE /etc/taos/taos.cfg || true + $@ + exit $? +fi + +set +e +ulimit -c unlimited +# set core files pattern, maybe failed +sysctl -w kernel.core_pattern=/corefile/core-$FQDN-%e-%p >/dev/null >&1 +set -e + +if [ "$DISABLE_ADAPTER" = "0" ]; then + which taosadapter >/dev/null && taosadapter & + # wait for 6041 port ready + for _ in $(seq 1 20); do + nc -z localhost 6041 && break + sleep 0.5 + done +fi + +# if has mnode ep set or the host is first ep or not for cluster, just start. +if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] || + [ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then + $@ -c $CFG_DIR +# others will first wait the first ep ready. +else + if [ "$TAOS_FIRST_EP" = "" ]; then + echo "run TDengine with single node." + $@ -c $CFG_DIR + exit $? + fi + while true; do + es=0 + taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$? + if [ "$es" -eq 0 ]; then + taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";" + break + fi + sleep 1s + done + $@ -c $CFG_DIR +fi diff --git a/packaging/docker/bin/env-to-cfg b/packaging/docker/bin/env-to-cfg new file mode 100755 index 0000000000000000000000000000000000000000..07be63e0a9aba74e271ccc20758cd2ab09fb44ed --- /dev/null +++ b/packaging/docker/bin/env-to-cfg @@ -0,0 +1,13 @@ +#!/bin/sh +set -e +self=$0 + +snake_to_camel_case() { + echo $1 | awk -F _ '{printf "%s", $1; for(i=2; i<=NF; i++) printf "%s", toupper(substr($i,1,1)) substr($i,2); print"";}' +} + +if echo $1 | grep -E "^$" - >/dev/null; then + export |grep -E 'TAOS_.*' -o| sed 's/TAOS_//' |tr A-Z a-z | awk -F"=" '{print "name=$(""'$self' " $1"); echo $name "$2}' |sh +else + snake_to_camel_case $1 +fi diff --git a/packaging/docker/docker-compose.yml b/packaging/docker/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..301b41e7d43c2a894d866c1f0d45cf8d13328585 --- /dev/null +++ b/packaging/docker/docker-compose.yml @@ -0,0 +1,77 @@ +version: "3" + +networks: + inter: + api: + +services: + arbitrator: + image: tdengine/tdengine:$VERSION + command: tarbitrator + networks: + - inter + td-1: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + TAOS_NUM_OF_MNODES: "2" + TAOS_REPLICA: "2" + TAOS_ARBITRATOR: arbitrator:6042 + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + TAOS_NUM_OF_MNODES: "2" + TAOS_REPLICA: "2" + TAOS_ARBITRATOR: arbitrator:6042 + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ + adapter: + image: tdengine/tdengine:$VERSION + command: taosadapter + networks: + - inter + environment: + TAOS_FIRST_EP: "td-1" + TOAS_SECOND_EP: "td-2" + deploy: + replicas: 4 + update_config: + parallelism: 4 + nginx: + image: nginx + depends_on: + - adapter + networks: + - inter + - api + ports: + - 6041:6041 + - 6044:6044/udp + command: [ + "sh", + "-c", + "while true; + do curl -s http://adapter:6041/-/ping >/dev/null && break; + done; + printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' + > /etc/nginx/conf.d/rest.conf; + printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' + >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; + nginx -g 'daemon off;'", + ] +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh index 9f60b840d68577b751314e7ddecc9da98c20f8d6..71788423f6e58b2788346ef2804cd4d03ee54b02 100755 --- a/packaging/docker/dockerManifest.sh +++ b/packaging/docker/dockerManifest.sh @@ -44,30 +44,26 @@ echo "version=${version}" #docker manifest rm tdengine/tdengine #docker manifest rm tdengine/tdengine:${version} if [ "$verType" == "beta" ]; then - docker manifest inspect tdengine/tdengine-beta:latest - docker manifest inspect tdengine/tdengine-beta:${version} docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version} docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest - sleep 30 docker manifest rm tdengine/tdengine-beta:${version} docker manifest rm tdengine/tdengine-beta:latest docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version} docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest + docker manifest inspect tdengine/tdengine:latest + docker manifest inspect tdengine/tdengine:${version} docker login -u tdengine -p ${passWord} #replace the docker registry username and password docker manifest push tdengine/tdengine-beta:${version} docker manifest push tdengine/tdengine-beta:latest elif [ "$verType" == "stable" ]; then - docker manifest inspect tdengine/tdengine:latest - docker manifest inspect tdengine/tdengine:${version} docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version} docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest - sleep 30 docker manifest rm tdengine/tdengine:latest docker manifest rm tdengine/tdengine:${version} - docker manifest inspect tdengine/tdengine:latest - docker manifest inspect tdengine/tdengine:${version} docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version} docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest + docker manifest inspect tdengine/tdengine:latest + docker manifest inspect tdengine/tdengine:${version} docker login -u tdengine -p ${passWord} #replace the docker registry username and password docker manifest push tdengine/tdengine:${version} docker manifest push tdengine/tdengine:latest diff --git a/packaging/release.bat b/packaging/release.bat new file mode 100644 index 0000000000000000000000000000000000000000..c1cf7875a505852ce3f8c0b78029fedf481aed8f --- /dev/null +++ b/packaging/release.bat @@ -0,0 +1,62 @@ +@echo off + +set internal_dir=%~dp0\..\..\ +set community_dir=%~dp0\.. +cd %community_dir% +git checkout -- . +cd %community_dir%\packaging + +:: %1 name %2 version +if !%1==! GOTO USAGE +if !%2==! GOTO USAGE +if %1 == taos GOTO TAOS +if %1 == power GOTO POWER +if %1 == tq GOTO TQ +if %1 == pro GOTO PRO +if %1 == kh GOTO KH +if %1 == jh GOTO JH +GOTO USAGE + +:TAOS +goto RELEASE + +:POWER +call sed_power.bat %community_dir% +goto RELEASE + +:TQ +call sed_tq.bat %community_dir% +goto RELEASE + +:PRO +call sed_pro.bat %community_dir% +goto RELEASE + +:KH +call sed_kh.bat %community_dir% +goto RELEASE + +:JH +call sed_jh.bat %community_dir% +goto RELEASE + +:RELEASE +echo release windows-client-64 for %1, version: %2 +if not exist %internal_dir%\debug\ver-%2-64bit-%1 ( + md %internal_dir%\debug\ver-%2-64bit-%1 +) else ( + rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1 + md %internal_dir%\debug\ver-%2-64bit-%1 +) +cd %internal_dir%\debug\ver-%2-64bit-%1 +call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64 +cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64 +set CL=/MP4 +nmake install +goto EXIT0 + +:USAGE +echo Usage: release.bat $productName $version +goto EXIT0 + +:EXIT0 \ No newline at end of file diff --git a/packaging/release.sh b/packaging/release.sh index e24493bd0a834e79faadffd468e574f2554fbac1..46f42736d75cfef0ce9c265d0c006166086cc031 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -4,6 +4,13 @@ set -e #set -x +scriptDir=$(dirname $(readlink -f $0)) + +source $scriptDir/sed_power.sh +source $scriptDir/sed_tq.sh +source $scriptDir/sed_pro.sh +source $scriptDir/sed_kh.sh +source $scriptDir/sed_jh.sh # release.sh -v [cluster | edge] # -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] @@ -17,83 +24,82 @@ set -e # -H [ false | true] # set parameters by default value -verMode=edge # [cluster, edge] -verType=stable # [stable, beta] -cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] -osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] -pagMode=full # [full | lite] -soMode=dynamic # [static | dynamic] -dbName=taos # [taos | power | tq | pro | kh | jh] -allocator=glibc # [glibc | jemalloc] +verMode=edge # [cluster, edge] +verType=stable # [stable, beta] +cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] +osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] +pagMode=full # [full | lite] +soMode=dynamic # [static | dynamic] +dbName=taos # [taos | power | tq | pro | kh | jh] +allocator=glibc # [glibc | jemalloc] verNumber="" verNumberComp="1.0.0.0" httpdBuild=false -while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg -do +while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do case $arg in - v) - #echo "verMode=$OPTARG" - verMode=$( echo $OPTARG ) - ;; - V) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - c) - #echo "cpuType=$OPTARG" - cpuType=$(echo $OPTARG) - ;; - l) - #echo "pagMode=$OPTARG" - pagMode=$(echo $OPTARG) - ;; - s) - #echo "soMode=$OPTARG" - soMode=$(echo $OPTARG) - ;; - d) - #echo "dbName=$OPTARG" - dbName=$(echo $OPTARG) - ;; - a) - #echo "allocator=$OPTARG" - allocator=$(echo $OPTARG) - ;; - n) - #echo "verNumber=$OPTARG" - verNumber=$(echo $OPTARG) - ;; - m) - #echo "verNumberComp=$OPTARG" - verNumberComp=$(echo $OPTARG) - ;; - o) - #echo "osType=$OPTARG" - osType=$(echo $OPTARG) - ;; - H) - #echo "httpdBuild=$OPTARG" - httpdBuild=$(echo $OPTARG) - ;; - h) - echo "Usage: `basename $0` -v [cluster | edge] " - echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] " - echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] " - echo " -V [stable | beta] " - echo " -l [full | lite] " - echo " -a [glibc | jemalloc] " - echo " -s [static | dynamic] " - echo " -d [taos | power | tq | pro | kh | jh] " - echo " -n [version number] " - echo " -m [compatible version number] " - echo " -H [false | true] " - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; + v) + #echo "verMode=$OPTARG" + verMode=$(echo $OPTARG) + ;; + V) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + c) + #echo "cpuType=$OPTARG" + cpuType=$(echo $OPTARG) + ;; + l) + #echo "pagMode=$OPTARG" + pagMode=$(echo $OPTARG) + ;; + s) + #echo "soMode=$OPTARG" + soMode=$(echo $OPTARG) + ;; + d) + #echo "dbName=$OPTARG" + dbName=$(echo $OPTARG) + ;; + a) + #echo "allocator=$OPTARG" + allocator=$(echo $OPTARG) + ;; + n) + #echo "verNumber=$OPTARG" + verNumber=$(echo $OPTARG) + ;; + m) + #echo "verNumberComp=$OPTARG" + verNumberComp=$(echo $OPTARG) + ;; + o) + #echo "osType=$OPTARG" + osType=$(echo $OPTARG) + ;; + H) + #echo "httpdBuild=$OPTARG" + httpdBuild=$(echo $OPTARG) + ;; + h) + echo "Usage: $(basename $0) -v [cluster | edge] " + echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] " + echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] " + echo " -V [stable | beta] " + echo " -l [full | lite] " + echo " -a [glibc | jemalloc] " + echo " -s [static | dynamic] " + echo " -d [taos | power | tq | pro | kh | jh] " + echo " -n [version number] " + echo " -m [compatible version number] " + echo " -H [false | true] " + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; esac done @@ -105,7 +111,7 @@ if [ "$osType" != "Darwin" ]; then script_dir="$(dirname $(readlink -f $0))" top_dir="$(readlink -f ${script_dir}/..)" else - script_dir=`dirname $0` + script_dir=$(dirname $0) cd ${script_dir} script_dir="$(pwd)" top_dir=${script_dir}/.. @@ -126,7 +132,7 @@ function is_valid_version() { return 1 } -function vercomp () { +function vercomp() { if [[ $1 == $2 ]]; then echo 0 exit 0 @@ -136,11 +142,11 @@ function vercomp () { local i ver1=($1) ver2=($2) # fill empty fields in ver1 with zeros - for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do ver1[i]=0 done - for ((i=0; i<${#ver1[@]}; i++)); do + for ((i = 0; i < ${#ver1[@]}; i++)); do if [[ -z ${ver2[i]} ]]; then # fill empty fields in ver2 with zeros ver2[i]=0 @@ -158,7 +164,7 @@ function vercomp () { } # 1. check version information -if ( ( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]] ); then +if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then echo "please enter correct version" exit 0 fi @@ -194,301 +200,40 @@ fi cd ${compile_dir} if [[ "$allocator" == "jemalloc" ]]; then - allocator_macro="-DJEMALLOC_ENABLED=true" + allocator_macro="-DJEMALLOC_ENABLED=true" else - allocator_macro="" -fi - -# for powerdb -if [[ "$dbName" == "power" ]]; then - # cmake/install.inc - sed -i "s/C:\/TDengine/C:\/PowerDB/g" ${top_dir}/cmake/install.inc - sed -i "s/taos\.exe/power\.exe/g" ${top_dir}/cmake/install.inc - sed -i "s/taosdemo\.exe/powerdemo\.exe/g" ${top_dir}/cmake/install.inc - # src/kit/shell/inc/shell.h - sed -i "s/taos_history/power_history/g" ${top_dir}/src/kit/shell/inc/shell.h - # src/inc/taosdef.h - sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/inc/taosdef.h - # src/util/src/tconfig.c - sed -i "s/taos config/power config/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/util/src/tconfig.c - # src/kit/taosdemo/taosdemo.c - sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c - # src/util/src/tlog.c - sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/util/src/tlog.c - # src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeMain.c - sed -i "s/taosdlog/powerdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c - # src/client/src/tscSystem.c - sed -i "s/taoslog/powerlog/g" ${top_dir}/src/client/src/tscSystem.c - # src/util/src/tnote.c - sed -i "s/taosinfo/powerinfo/g" ${top_dir}/src/util/src/tnote.c - # src/dnode/CMakeLists.txt - sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt - # src/kit/taosdump/taosdump.c - sed -i "s/TDengine/Power/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/Default is taosdata/Default is power/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/taos\/taos\.cfg/power\/power\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c - # src/os/src/linux/linuxEnv.c - sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c - sed -i "s/lib\/taos/lib\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c - sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c - # src/os/src/windows/wEnv.c - sed -i "s/TDengine/PowerDB/g" ${top_dir}/src/os/src/windows/wEnv.c - # src/kit/shell/src/shellEngine.c - sed -i "s/TDengine shell/PowerDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/2020 by TAOS Data, Inc/2020 by PowerDB, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/\"taos> \"/\"power> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c -fi - -# for tq -if [[ "$dbName" == "tq" ]]; then - # cmake/install.inc - sed -i "s/C:\/TDengine/C:\/TQueue/g" ${top_dir}/cmake/install.inc - sed -i "s/taos\.exe/tq\.exe/g" ${top_dir}/cmake/install.inc - sed -i "s/taosdemo\.exe/tqdemo\.exe/g" ${top_dir}/cmake/install.inc - # src/kit/shell/inc/shell.h - sed -i "s/taos_history/tq_history/g" ${top_dir}/src/kit/shell/inc/shell.h - # src/inc/taosdef.h - sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/inc/taosdef.h - # src/util/src/tconfig.c - sed -i "s/taos config/tq config/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/util/src/tconfig.c - # src/kit/taosdemo/taosdemo.c - sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c - # src/util/src/tlog.c - sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/util/src/tlog.c - # src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/TQueue/g" ${top_dir}/src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/TQueue/g" ${top_dir}/src/dnode/src/dnodeMain.c - sed -i "s/taosdlog/tqdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c - # src/client/src/tscSystem.c - sed -i "s/taoslog/tqlog/g" ${top_dir}/src/client/src/tscSystem.c - # src/util/src/tnote.c - sed -i "s/taosinfo/tqinfo/g" ${top_dir}/src/util/src/tnote.c - # src/dnode/CMakeLists.txt - sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt - # src/kit/taosdump/taosdump.c - sed -i "s/TDengine/TQueue/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/Default is taosdata/Default is tqueue/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/taos\/taos\.cfg/tq\/tq\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c - # src/os/src/linux/linuxEnv.c - sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c - sed -i "s/lib\/taos/lib\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c - sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c - # src/os/src/windows/wEnv.c - sed -i "s/TDengine/TQ/g" ${top_dir}/src/os/src/windows/wEnv.c - # src/kit/shell/src/shellEngine.c - sed -i "s/TDengine shell/TQ shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/2020 by TAOS Data, Inc/2020 by TQ, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/\"taos> \"/\"tq> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c -fi - -# for prodb -if [[ "$dbName" == "pro" ]]; then - # cmake/install.inc - sed -i "s/C:\/TDengine/C:\/ProDB/g" ${top_dir}/cmake/install.inc - sed -i "s/taos\.exe/prodbc\.exe/g" ${top_dir}/cmake/install.inc - sed -i "s/taosdemo\.exe/prodemo\.exe/g" ${top_dir}/cmake/install.inc - # src/kit/shell/inc/shell.h - sed -i "s/taos_history/prodb_history/g" ${top_dir}/src/kit/shell/inc/shell.h - # src/inc/taosdef.h - sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/inc/taosdef.h - # src/util/src/tconfig.c - sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/util/src/tconfig.c - # src/kit/taosdemo/taosdemo.c - sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c - sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${top_dir}/src/kit/taosdemo/taosdemo.c - # src/util/src/tlog.c - sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/util/src/tlog.c - # src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeMain.c - sed -i "s/taosdlog/prodlog/g" ${top_dir}/src/dnode/src/dnodeMain.c - # src/client/src/tscSystem.c - sed -i "s/taoslog/prolog/g" ${top_dir}/src/client/src/tscSystem.c - # src/util/src/tnote.c - sed -i "s/taosinfo/proinfo/g" ${top_dir}/src/util/src/tnote.c - # src/dnode/CMakeLists.txt - sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt - # src/kit/taosdump/taosdump.c - sed -i "s/Default is taosdata/Default is prodb/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/taos\/taos\.cfg/ProDB\/prodb\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/TDengine/ProDB/g" ${top_dir}/src/kit/taosdump/taosdump.c - # src/os/src/linux/linuxEnv.c - sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c - sed -i "s/lib\/taos/lib\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c - sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c - # src/os/src/windows/wEnv.c - sed -i "s/TDengine/ProDB/g" ${top_dir}/src/os/src/windows/wEnv.c - # src/kit/shell/src/shellEngine.c - sed -i "s/TDengine shell/ProDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/2020 by TAOS Data, Inc/2020 by Hanatech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/\"taos> \"/\"ProDB> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c -fi - -# for KingHistorian -if [[ "$dbName" == "kh" ]]; then - # cmake/install.inc - sed -i "s/C:\/TDengine/C:\/KingHistorian/g" ${top_dir}/cmake/install.inc - sed -i "s/taos\.exe/khclient\.exe/g" ${top_dir}/cmake/install.inc - sed -i "s/taosdemo\.exe/khdemo\.exe/g" ${top_dir}/cmake/install.inc - # src/kit/shell/inc/shell.h - sed -i "s/taos_history/kh_history/g" ${top_dir}/src/kit/shell/inc/shell.h - # src/inc/taosdef.h - sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/inc/taosdef.h - # src/util/src/tconfig.c - sed -i "s/taos config/kh config/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/util/src/tconfig.c - # src/kit/taosdemo/taosdemo.c - sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c - sed -i "s/support@taosdata.com/support@wellintech.com/g" ${top_dir}/src/kit/taosdemo/taosdemo.c - # src/util/src/tlog.c - sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/util/src/tlog.c - # src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeMain.c - sed -i "s/taosdlog/khserverlog/g" ${top_dir}/src/dnode/src/dnodeMain.c - # src/client/src/tscSystem.c - sed -i "s/taoslog/khclientlog/g" ${top_dir}/src/client/src/tscSystem.c - # src/util/src/tnote.c - sed -i "s/taosinfo/khinfo/g" ${top_dir}/src/util/src/tnote.c - # src/dnode/CMakeLists.txt - sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt - # src/dnode/CMakeLists.txt - sed -i "s/Default is taosdata/Default is khroot/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/kit/taosdump/taosdump.c - sed -i "s/taos\/taos\.cfg/kinghistorian\/kinghistorian\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c - # src/os/src/linux/linuxEnv.c - sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c - sed -i "s/lib\/taos/lib\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c - sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c - # src/os/src/windows/wEnv.c - sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/os/src/windows/wEnv.c - # src/kit/shell/src/shellEngine.c - sed -i "s/TDengine shell/KingHistorian shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/2020 by TAOS Data, Inc/2021 by Wellintech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/\"taos> \"/\"kh> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c - sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c + allocator_macro="" fi -# for jinheng -if [[ "$dbName" == "jh" ]]; then - # Following files to change: - # * src/client/src/tscSystem.c - # * src/inc/taosdef.h - # * src/kit/shell/CMakeLists.txt - # * src/kit/shell/inc/shell.h - # * src/kit/shell/src/shellEngine.c - # * src/kit/shell/src/shellWindows.c - # * src/kit/taosdemo/taosdemo.c - # * src/kit/taosdump/taosdump.c - # * src/os/src/linux/linuxEnv.c - # * src/os/src/windows/wEnv.c - # * src/util/src/tconfig.c - # * src/util/src/tlog.c - - # src/dnode/src/dnodeSystem.c - sed -i "s/TDengine/jh_iot/g" ${top_dir}/src/dnode/src/dnodeSystem.c - # src/dnode/src/dnodeMain.c - sed -i "s/TDengine/jh_iot/g" ${top_dir}/src/dnode/src/dnodeMain.c - # TODO: src/dnode/CMakeLists.txt +if [[ "$dbName" != "taos" ]]; then + replace_community_$dbName fi if [[ "$httpdBuild" == "true" ]]; then - BUILD_HTTP=true + BUILD_HTTP=true else - BUILD_HTTP=false + BUILD_HTTP=false fi if [[ "$verMode" == "cluster" ]]; then - BUILD_HTTP=internal + BUILD_HTTP=internal fi if [[ "$pagMode" == "full" ]]; then - BUILD_TOOLS=true + BUILD_TOOLS=true else - BUILD_TOOLS=false + BUILD_TOOLS=false fi # check support cpu type -if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then +if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]]; then if [ "$verMode" != "cluster" ]; then # community-version compile - cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} + cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} else - # enterprise-version compile - if [[ "$dbName" == "power" ]]; then - # enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/TDengine/PowerDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - # enterprise/src/plugins/admin/src/httpAdminHandle.c - sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c - # enterprise/src/plugins/grant/src/grantMain.c - sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c - # enterprise/src/plugins/module/src/moduleMain.c - sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c - fi - if [[ "$dbName" == "tq" ]]; then - # enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/TDengine/TQueue/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - # enterprise/src/plugins/admin/src/httpAdminHandle.c - sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c - # enterprise/src/plugins/grant/src/grantMain.c - sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c - # enterprise/src/plugins/module/src/moduleMain.c - sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c - fi - if [[ "$dbName" == "pro" ]]; then - # enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/TDengine/ProDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - # enterprise/src/plugins/admin/src/httpAdminHandle.c - sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c - # enterprise/src/plugins/grant/src/grantMain.c - sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c - # enterprise/src/plugins/module/src/moduleMain.c - sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c - fi - if [[ "$dbName" == "kh" ]]; then - # enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/TDengine/KingHistorian/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - # enterprise/src/plugins/admin/src/httpAdminHandle.c - sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c - # enterprise/src/plugins/grant/src/grantMain.c - sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c - # enterprise/src/plugins/module/src/moduleMain.c - sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c - fi - if [[ "$dbName" == "jh" ]]; then - # enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/\"taosdata\"/\"jhdata\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - sed -i "s/TDengine/jh_iot/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c - # enterprise/src/plugins/admin/src/httpAdminHandle.c - #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c - # enterprise/src/plugins/grant/src/grantMain.c - #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c - # enterprise/src/plugins/module/src/moduleMain.c - #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + + if [[ "$dbName" != "taos" ]]; then + replace_enterprise_$dbName fi cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} @@ -498,13 +243,13 @@ else exit 1 fi -CORES=`grep -c ^processor /proc/cpuinfo` +CORES=$(grep -c ^processor /proc/cpuinfo) if [[ "$allocator" == "jemalloc" ]]; then - # jemalloc need compile first, so disable parallel build - make -j ${CORES} && ${csudo}make install + # jemalloc need compile first, so disable parallel build + make -j ${CORES} && ${csudo}make install else - make -j ${CORES} && ${csudo}make install + make -j ${CORES} && ${csudo}make install fi cd ${curr_dir} @@ -525,16 +270,16 @@ if [ "$osType" != "Darwin" ]; then ${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} if [[ "$pagMode" == "full" ]]; then - if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then - cd ${top_dir}/src/kit/taos-tools/packaging/deb - [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" - - taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}') - ${csudo}./make-taos-tools-deb.sh ${top_dir} \ - ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} - fi + if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then + cd ${top_dir}/src/kit/taos-tools/packaging/deb + [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" + + taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}') + ${csudo}./make-taos-tools-deb.sh ${top_dir} \ + ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} + fi fi - else + else echo "==========dpkg command not exist, so not release deb package!!!" fi ret='0' @@ -550,16 +295,16 @@ if [ "$osType" != "Darwin" ]; then ${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} if [[ "$pagMode" == "full" ]]; then - if [ -d ${top_dir}/src/kit/taos-tools/packaging/rpm ]; then - cd ${top_dir}/src/kit/taos-tools/packaging/rpm - [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" - - taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}'|sed -e 's/-/_/g') - ${csudo}./make-taos-tools-rpm.sh ${top_dir} \ - ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} - fi + if [ -d ${top_dir}/src/kit/taos-tools/packaging/rpm ]; then + cd ${top_dir}/src/kit/taos-tools/packaging/rpm + [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" + + taos_tools_ver=$(git describe --tags | sed -e 's/ver-//g' | awk -F '-' '{print $1}' | sed -e 's/-/_/g') + ${csudo}./make-taos-tools-rpm.sh ${top_dir} \ + ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} + fi fi - else + else echo "==========rpmbuild command not exist, so not release rpm package!!!" fi fi @@ -567,31 +312,9 @@ if [ "$osType" != "Darwin" ]; then echo "====do tar.gz package for all systems====" cd ${script_dir}/tools - if [[ "$dbName" == "taos" ]]; then - ${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} - ${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} - ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} - elif [[ "$dbName" == "tq" ]]; then - ${csudo}./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} - ${csudo}./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} - ${csudo}./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} - elif [[ "$dbName" == "pro" ]]; then - ${csudo}./makepkg_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} - ${csudo}./makeclient_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} - ${csudo}./makearbi_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} - elif [[ "$dbName" == "kh" ]]; then - ${csudo}./makepkg_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} - ${csudo}./makeclient_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} - ${csudo}./makearbi_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} - elif [[ "$dbName" == "jh" ]]; then - ${csudo}./makepkg_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} - ${csudo}./makeclient_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} - ${csudo}./makearbi_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} - else - ${csudo}./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} - ${csudo}./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} - ${csudo}./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} - fi + ${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} + ${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} else # only make client for Darwin cd ${script_dir}/tools diff --git a/packaging/rpm/powerd b/packaging/rpm/powerd deleted file mode 100644 index bf7f19aea2d0e82e6ff46667a421fece44d149bf..0000000000000000000000000000000000000000 --- a/packaging/rpm/powerd +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/bash -# -# power This shell script takes care of starting and stopping PowerDB. -# -# chkconfig: 2345 99 01 -# description: PowerDB is a districuted, scalable, high-performance Time Series Database -# (TSDB). More than just a pure database, PowerDB also provides the ability -# to do stream computing, aggregation etc. -# -# -### BEGIN INIT INFO -# Provides: powerd -# Required-Start: $network $local_fs $remote_fs -# Required-Stop: $network $local_fs $remote_fs -# Short-Description: start and stop powerd -# Description: PowerDB is a districuted, scalable, high-performance Time Series Database -# (TSDB). More than just a pure database, PowerDB also provides the ability -# to do stream computing, aggregation etc. -### END INIT INFO - -# Source init functions -. /etc/init.d/functions - -# Maximum number of open files -MAX_OPEN_FILES=65535 - -# Default program options -NAME=powerd -PROG=/usr/local/power/bin/powerd -USER=root -GROUP=root - -# Default directories -LOCK_DIR=/var/lock/subsys -PID_DIR=/var/run/$NAME - -# Set file names -LOCK_FILE=$LOCK_DIR/$NAME -PID_FILE=$PID_DIR/$NAME.pid - -[ -e $PID_DIR ] || mkdir -p $PID_DIR - -PROG_OPTS="" - -start() { - echo -n "Starting ${NAME}: " - # check identity - curid="`id -u -n`" - if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then - echo "Must be run as root or $USER, but was run as $curid" - return 1 - fi - # Sets the maximum number of open file descriptors allowed. - ulimit -n $MAX_OPEN_FILES - curulimit="`ulimit -n`" - if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then - echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit" - return 1 - fi - - if [ "`id -u -n`" == root ] ; then - # Changes the owner of the lock, and the pid files to allow - # non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py. - touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE - touch $PID_FILE && chown $USER:$GROUP $PID_FILE - daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &" - else - # Don't have to change user. - daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &" - fi - retval=$? - sleep 2 - echo - [ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE) - return $retval -} - -stop() { - echo -n "Stopping ${NAME}: " - killproc -p $PID_FILE $NAME - retval=$? - echo - # Non-root users don't have enough permission to remove pid and lock files. - # So, the opentsdb_restart.py cannot get rid of the files, and the command - # "service opentsdb status" will complain about the existing pid file. - # Makes the pid file empty. - echo > $PID_FILE - [ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE) - return $retval -} - -restart() { - stop - start -} - -reload() { - restart -} - -force_reload() { - restart -} - -rh_status() { - # run checks to determine if the service is running or use generic status - status -p $PID_FILE -l $LOCK_FILE $NAME -} - -rh_status_q() { - rh_status >/dev/null 2>&1 -} - -case "$1" in - start) - rh_status_q && exit 0 - $1 - ;; - stop) - rh_status_q || exit 0 - $1 - ;; - restart) - $1 - ;; - reload) - rh_status_q || exit 7 - $1 - ;; - force-reload) - force_reload - ;; - status) - rh_status - ;; - condrestart|try-restart) - rh_status_q || exit 0 - restart - ;; - *) - echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" - exit 2 -esac - -exit $? diff --git a/packaging/sed_jh.bat b/packaging/sed_jh.bat new file mode 100644 index 0000000000000000000000000000000000000000..f7ce46562c913f4a6043c872fdec0104d0153d46 --- /dev/null +++ b/packaging/sed_jh.bat @@ -0,0 +1,76 @@ +set sed="C:\Program Files\Git\usr\bin\sed.exe" +set community_dir=%1 + +::cmake\install.inc +%sed% -i "s/C:\/TDengine/C:\/jh_iot/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.cfg/jh_taos\.cfg/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.exe/jh_taos\.exe/g" %community_dir%\cmake\install.inc +%sed% -i "s/taosdemo\.exe/jhdemo\.exe/g" %community_dir%\cmake\install.inc +%sed% -i "/src\/connector/d" %community_dir%\cmake\install.inc +%sed% -i "/tests\/examples/d" %community_dir%\cmake\install.inc +::src\kit\shell\CMakeLists.txt +%sed% -i "s/OUTPUT_NAME taos/OUTPUT_NAME jh_taos/g" %community_dir%\src\kit\shell\CMakeLists.txt +::src\kit\shell\inc\shell.h +%sed% -i "s/taos_history/jh_taos_history/g" %community_dir%\src\kit\shell\inc\shell.h +::src\inc\taosdef.h +%sed% -i "s/\"taosdata\"/\"jhdata\"/g" %community_dir%\src\inc\taosdef.h +::src\util\src\tconfig.c +%sed% -i "s/taos\.cfg/jh_taos\.cfg/g" %community_dir%\src\util\src\tconfig.c +%sed% -i "s/etc\/taos/etc\/jh_taos/g" %community_dir%\src\util\src\tconfig.c +::src\kit\taosdemo\CMakeLists.txt +%sed% -i "s/ADD_EXECUTABLE(taosdemo/ADD_EXECUTABLE(jhdemo/g" %community_dir%\src\kit\taosdemo\CMakeLists.txt +%sed% -i "s/TARGET_LINK_LIBRARIES(taosdemo/TARGET_LINK_LIBRARIES(jhdemo/g" %community_dir%\src\kit\taosdemo\CMakeLists.txt +::src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo --help/jhdemo --help/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo --usage/jhdemo --usage/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/Usage: taosdemo/Usage: jhdemo/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo is simulating/jhdemo is simulating/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo version/jhdemo version/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/\"taosdata\"/\"jhdata\"/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/support@taosdata\.com/jhkj@njsteel\.com\.cn/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosc, rest, and stmt/jh_taos, rest, and stmt/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo uses/jhdemo uses/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/use 'taosc'/use 'jh_taos'/g" %community_dir%\src\kit\taosdemo\taosdemo.c +::src\util\src\tlog.c +%sed% -i "s/log\/taos/log\/jh_taos/g" %community_dir%\src\util\src\tlog.c +::src\dnode\src\dnodeSystem.c +%sed% -i "s/TDengine/jh_iot/g" %community_dir%\src\dnode\src\dnodeSystem.c +::src\dnode\src\dnodeMain.c +%sed% -i "s/TDengine/jh_iot/g" %community_dir%\src\dnode\src\dnodeMain.c +%sed% -i "s/taosdlog/jh_taosdlog/g" %community_dir%\src\dnode\src\dnodeMain.c +::src\client\src\tscSystem.c +%sed% -i "s/taoslog/jh_taoslog/g" %community_dir%\src\client\src\tscSystem.c +::src\util\src\tnote.c +%sed% -i "s/taosinfo/jh_taosinfo/g" %community_dir%\src\util\src\tnote.c +::src\dnode\CMakeLists.txt +%sed% -i "s/taos\.cfg/jh_taos\.cfg/g" %community_dir%\src\dnode\CMakeLists.txt +::src\kit\taosdump\taosdump.c +%sed% -i "s/support@taosdata\.com/jhkj@njsteel\.com\.cn/g" %community_dir%\src\kit\taosdump\taosdump.c +%sed% -i "s/Default is taosdata/Default is jhdata/g" %community_dir%\src\kit\taosdump\taosdump.c +%sed% -i "s/\"taosdata\"/\"jhdata\"/g" %community_dir%\src\kit\taosdump\taosdump.c +%sed% -i "s/TDengine/jh_iot/g" %community_dir%\src\kit\taosdump\taosdump.c +%sed% -i "s/taos\/taos\.cfg/jh_taos\/jh_taos\.cfg/g" %community_dir%\src\kit\taosdump\taosdump.c +::src\os\src\linux\linuxEnv.c +%sed% -i "s/etc\/taos/etc\/jh_taos/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/lib\/taos/lib\/jh_taos/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/log\/taos/log\/jh_taos/g" %community_dir%\src\os\src\linux\linuxEnv.c +::src\kit\shell\src\shellDarwin.c +%sed% -i "s/TDengine shell/jh_iot shell/g" %community_dir%\src\kit\shell\src\shellDarwin.c +%sed% -i "s/2020 by TAOS Data/2021 by Jinheng Technology/g" %community_dir%\src\kit\shell\src\shellDarwin.c +::src\kit\shell\src\shellLinux.c +%sed% -i "s/support@taosdata\.com/jhkj@njsteel\.com\.cn/g" %community_dir%\src\kit\shell\src\shellLinux.c +%sed% -i "s/TDengine shell/jh_iot shell/g" %community_dir%\src\kit\shell\src\shellLinux.c +%sed% -i "s/2020 by TAOS Data/2021 by Jinheng Technology/g" %community_dir%\src\kit\shell\src\shellLinux.c +::src\os\src\windows\wEnv.c +%sed% -i "s/TDengine/jh_iot/g" %community_dir%\src\os\src\windows\wEnv.c +::src\kit\shell\src\shellEngine.c +%sed% -i "s/TDengine shell/jh_iot shell/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/2020 by TAOS Data, Inc/2021 by Jinheng Technology, Inc/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/taos connect failed/jh_taos connect failed/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\"taos^> \"/\"jh_taos^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\" -^> \"/\" -^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/prompt_size = 6/prompt_size = 9/g" %community_dir%\src\kit\shell\src\shellEngine.c +::src\rpc\src\rpcMain.c +%sed% -i "s/taos connections/jh_taos connections/g" %community_dir%\src\rpc\src\rpcMain.c +::src\plugins\monitor\src\monMain.c +%sed% -i "s/taosd is quiting/jh_taosd is quiting/g" %community_dir%\src\plugins\monitor\src\monMain.c diff --git a/packaging/sed_jh.sh b/packaging/sed_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..0c288bee76c0745f5d3cf3b23d4aa103c1897c22 --- /dev/null +++ b/packaging/sed_jh.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +function replace_community_jh() { + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/jh_iot/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.cfg/jh_taos\.cfg/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/jh_taos\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/CMakeLists.txt + sed -i "s/OUTPUT_NAME taos/OUTPUT_NAME jh_taos/g" ${top_dir}/src/kit/shell/CMakeLists.txt + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/jh_taos_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"jhdata\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos\.cfg/jh_taos\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/jh_taos/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos config/jh_taos config/g" ${top_dir}/src/util/src/tconfig.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/jh_taos/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/jh_taos/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/jh_taos/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/jh_taosdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/jh_taoslog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/jh_taosinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/jh_taos\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + echo "SET_TARGET_PROPERTIES(taosd PROPERTIES OUTPUT_NAME jh_taosd)" >>${top_dir}/src/dnode/CMakeLists.txt + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/jh_taos/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/jh_taos/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/jh_taos/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/kit/shell/src/shellDarwin.c + sed -i "s/TDengine shell/jh_iot shell/g" ${top_dir}/src/kit/shell/src/shellDarwin.c + sed -i "s/2020 by TAOS Data/2021 by Jinheng Technology/g" ${top_dir}/src/kit/shell/src/shellDarwin.c + # src/kit/shell/src/shellLinux.c + sed -i "s/support@taosdata\.com/jhkj@njsteel\.com\.cn/g" ${top_dir}/src/kit/shell/src/shellLinux.c + sed -i "s/TDengine shell/jh_iot shell/g" ${top_dir}/src/kit/shell/src/shellLinux.c + sed -i "s/2020 by TAOS Data/2021 by Jinheng Technology/g" ${top_dir}/src/kit/shell/src/shellLinux.c + # src/os/src/windows/wEnv.c + sed -i "s/C:\/TDengine/C:\/jh_iot/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/jh_iot shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2021 by Jinheng Technology, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/taos connect failed/jh_taos connect failed/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"jh_taos> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 9/g" ${top_dir}/src/kit/shell/src/shellEngine.c + # src/rpc/src/rpcMain.c + sed -i "s/taos connections/jh_taos connections/g" ${top_dir}/src/rpc/src/rpcMain.c + # src/plugins/monitor/src/monMain.c + sed -i "s/taosd is quiting/jh_taosd is quiting/g" ${top_dir}/src/plugins/monitor/src/monMain.c + + # packaging/tools/makepkg.sh + sed -i "s/productName=\"TDengine\"/productName=\"jh_iot\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"jh_taos\.cfg\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"jh_taos\.tar\.gz\"/g" ${top_dir}/packaging/tools/makepkg.sh + # packaging/tools/remove.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/jh_taos\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmjh\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/productName=\"TDengine\"/productName=\"jh_iot\"/g" ${top_dir}/packaging/tools/remove.sh + # packaging/tools/startPre.sh + sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/startPre.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/jh_taos\"/g" ${top_dir}/packaging/tools/startPre.sh + # packaging/tools/run_taosd.sh + sed -i "s/taosd/jh_taosd/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/install.sh + sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"jh_taos\.cfg\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/productName=\"TDengine\"/productName=\"jh_iot\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/emailName=\"taosdata\.com\"/emailName=\"\jhict\.com\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmjh\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/historyFile=\"taos_history\"/historyFile=\"jh_taos_history\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"jh_taos\.tar\.gz\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/jh_taos\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/jh_taos\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/jh_taos\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/jh_taos\"/g" ${top_dir}/packaging/tools/install.sh + + # packaging/tools/makeclient.sh + sed -i "s/productName=\"TDengine\"/productName=\"jh_iot\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"jh_taos\.cfg\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"jh_taos\.tar\.gz\"/g" ${top_dir}/packaging/tools/makeclient.sh + # packaging/tools/remove_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/jh_taos\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmjh\"/g" ${top_dir}/packaging/tools/remove_client.sh + # packaging/tools/install_client.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/jh_iot\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/jh_taos\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/productName=\"TDengine\"/productName=\"jh_iot\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/jh_taos\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/jh_taos\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmjh\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"jh_taos\.cfg\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"jh_taos\.tar\.gz\"/g" ${top_dir}/packaging/tools/install_client.sh + + # packaging/tools/makearbi.sh + sed -i "s/productName=\"TDengine\"/productName=\"jh_iot\"/g" ${top_dir}/packaging/tools/makearbi.sh + # packaging/tools/remove_arbi.sh + sed -i "s/TDengine/jh_iot/g" ${top_dir}/packaging/tools/remove_arbi.sh + # packaging/tools/install_arbi.sh + sed -i "s/TDengine/jh_iot/g" ${top_dir}/packaging/tools/install_arbi.sh + sed -i "s/taosdata\.com/jhict\.com/g" ${top_dir}/packaging/tools/install_arbi.sh + + # packaging/tools/make_install.sh + sed -i "s/clientName=\"taos\"/clientName=\"jh_taos\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"jh_taosd\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/jh_taos\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/jh_taos\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/jh_taos\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"jh_taos\.cfg\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/jh_taos\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/productName=\"TDengine\"/productName=\"jh_iot\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/emailName=\"taosdata\.com\"/emailName=\"jhict\.com\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmjh\"/g" ${top_dir}/packaging/tools/make_install.sh + + # packaging/rpm/taosd + sed -i "s/TDengine/jh_iot/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/usr\/local\/taos/usr\/local\/jh_taos/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/taosd/jh_taosd/g" ${top_dir}/packaging/rpm/taosd + # packaging/deb/taosd + sed -i "s/TDengine/jh_iot/g" ${top_dir}/packaging/deb/taosd + sed -i "s/usr\/local\/taos/usr\/local\/jh_taos/g" ${top_dir}/packaging/deb/taosd + sed -i "s/taosd/jh_taosd/g" ${top_dir}/packaging/deb/taosd + +} + +function replace_enterprise_jh() { + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"jhdata\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/jh_iot/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/jh_taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/jh_taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/jh_taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + + # enterprise/src/plugins/web + sed -i -e "s/www\.taosdata\.com/www\.jhict\.com\.cn/g" $(grep -r "www.taosdata.com" ${top_dir}/../enterprise/src/plugins/web | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e "s/2017, TAOS Data/2021, Jinheng Technology/g" $(grep -r "TAOS Data" ${top_dir}/../enterprise/src/plugins/web | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e "s/taosd/jh_taosd/g" $(grep -r "taosd" ${top_dir}/../enterprise/src/plugins/web | grep -E "*\.js\s*.*" | sed -r -e "s/(.*\.js):\s*(.*)/\1/g" | sort | uniq) + # enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/taosd<\/th>/jh_taosd<\/th>/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['jh_taosd', 'system'\],/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'jh_taosd',/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + # enterprise/src/plugins/web/admin/*.html + sed -i "s/TDengine/jh_iot/g" ${top_dir}/../enterprise/src/plugins/web/admin/*.html + # enterprise/src/plugins/web/admin/js/*.js + sed -i "s/TDengine/jh_iot/g" ${top_dir}/../enterprise/src/plugins/web/admin/js/*.js +} diff --git a/packaging/sed_kh.bat b/packaging/sed_kh.bat new file mode 100644 index 0000000000000000000000000000000000000000..975bdbbcc03d78f21b8b7532031d60f97a687d0a --- /dev/null +++ b/packaging/sed_kh.bat @@ -0,0 +1,76 @@ +set sed="C:\Program Files\Git\usr\bin\sed.exe" +set community_dir=%1 + +::cmake\install.inc +%sed% -i "s/C:\/TDengine/C:\/KingHistorian/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.cfg/kinghistorian\.cfg/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.exe/khclient\.exe/g" %community_dir%\cmake\install.inc +%sed% -i "s/taosdemo\.exe/khdemo\.exe/g" %community_dir%\cmake\install.inc +%sed% -i "/src\/connector/d" %community_dir%\cmake\install.inc +%sed% -i "/tests\/examples/d" %community_dir%\cmake\install.inc +::src\kit\shell\CMakeLists.txt +%sed% -i "s/OUTPUT_NAME taos/OUTPUT_NAME khclient/g" %community_dir%\src\kit\shell\CMakeLists.txt +::src\kit\shell\inc\shell.h +%sed% -i "s/taos_history/kh_history/g" %community_dir%\src\kit\shell\inc\shell.h +::src\inc\taosdef.h +%sed% -i "s/\"taosdata\"/\"khroot\"/g" %community_dir%\src\inc\taosdef.h +::src\util\src\tconfig.c +%sed% -i "s/taos\.cfg/kinghistorian\.cfg/g" %community_dir%\src\util\src\tconfig.c +%sed% -i "s/etc\/taos/etc\/kinghistorian/g" %community_dir%\src\util\src\tconfig.c +::src\kit\taosdemo\CMakeLists.txt +%sed% -i "s/ADD_EXECUTABLE(taosdemo/ADD_EXECUTABLE(khdemo/g" %community_dir%\src\kit\taosdemo\CMakeLists.txt +%sed% -i "s/TARGET_LINK_LIBRARIES(taosdemo/TARGET_LINK_LIBRARIES(khdemo/g" %community_dir%\src\kit\taosdemo\CMakeLists.txt +::src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo --help/khdemo --help/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo --usage/khdemo --usage/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/Usage: taosdemo/Usage: khdemo/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo is simulating/khdemo is simulating/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo version/khdemo version/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/\"taosdata\"/\"khroot\"/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/support@taosdata\.com/support@wellintech\.com/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosc, rest, and stmt/khclient, rest, and stmt/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/taosdemo uses/khdemo uses/g" %community_dir%\src\kit\taosdemo\taosdemo.c +%sed% -i "s/use 'taosc'/use 'khclient'/g" %community_dir%\src\kit\taosdemo\taosdemo.c +::src\util\src\tlog.c +%sed% -i "s/log\/taos/log\/kinghistorian/g" %community_dir%\src\util\src\tlog.c +::src\dnode\src\dnodeSystem.c +%sed% -i "s/TDengine/KingHistorian/g" %community_dir%\src\dnode\src\dnodeSystem.c +::src\dnode\src\dnodeMain.c +%sed% -i "s/TDengine/KingHistorian/g" %community_dir%\src\dnode\src\dnodeMain.c +%sed% -i "s/taosdlog/khserverlog/g" %community_dir%\src\dnode\src\dnodeMain.c +::src\client\src\tscSystem.c +%sed% -i "s/taoslog/khclientlog/g" %community_dir%\src\client\src\tscSystem.c +::src\util\src\tnote.c +%sed% -i "s/taosinfo/khinfo/g" %community_dir%\src\util\src\tnote.c +::src\dnode\CMakeLists.txt +%sed% -i "s/taos\.cfg/kinghistorian\.cfg/g" %community_dir%\src\dnode\CMakeLists.txt +::src\kit\taosdump\taosdump.c +%sed% -i "s/support@taosdata\.com/support@wellintech\.com/g" %community_dir%\src\kit\taosdump\taosdump.c +%sed% -i "s/Default is taosdata/Default is khroot/g" %community_dir%\src\kit\taosdump\taosdump.c +%sed% -i "s/\"taosdata\"/\"khroot\"/g" %community_dir%\src\kit\taosdump\taosdump.c +%sed% -i "s/TDengine/KingHistorian/g" %community_dir%\src\kit\taosdump\taosdump.c +%sed% -i "s/taos\/taos\.cfg/kinghistorian\/kinghistorian\.cfg/g" %community_dir%\src\kit\taosdump\taosdump.c +::src\os\src\linux\linuxEnv.c +%sed% -i "s/etc\/taos/etc\/kinghistorian/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/lib\/taos/lib\/kinghistorian/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/log\/taos/log\/kinghistorian/g" %community_dir%\src\os\src\linux\linuxEnv.c +::src\kit\shell\src\shellDarwin.c +%sed% -i "s/TDengine shell/KingHistorian shell/g" %community_dir%\src\kit\shell\src\shellDarwin.c +%sed% -i "s/2020 by TAOS Data/2021 by Wellintech/g" %community_dir%\src\kit\shell\src\shellDarwin.c +::src\kit\shell\src\shellLinux.c +%sed% -i "s/support@taosdata\.com/support@wellintech\.com/g" %community_dir%\src\kit\shell\src\shellLinux.c +%sed% -i "s/TDengine shell/KingHistorian shell/g" %community_dir%\src\kit\shell\src\shellLinux.c +%sed% -i "s/2020 by TAOS Data/2021 by Wellintech/g" %community_dir%\src\kit\shell\src\shellLinux.c +::src\os\src\windows\wEnv.c +%sed% -i "s/TDengine/KingHistorian/g" %community_dir%\src\os\src\windows\wEnv.c +::src\kit\shell\src\shellEngine.c +%sed% -i "s/TDengine shell/KingHistorian shell/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/2020 by TAOS Data, Inc/2021 by Wellintech, Inc/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/taos connect failed/kh connect failed/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\"taos^> \"/\"khclient^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\" -^> \"/\" -^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/prompt_size = 6/prompt_size = 10/g" %community_dir%\src\kit\shell\src\shellEngine.c +::src\rpc\src\rpcMain.c +%sed% -i "s/taos connections/kh connections/g" %community_dir%\src\rpc\src\rpcMain.c +::src\plugins\monitor\src\monMain.c +%sed% -i "s/taosd is quiting/khserver is quiting/g" %community_dir%\src\plugins\monitor\src\monMain.c \ No newline at end of file diff --git a/packaging/sed_kh.sh b/packaging/sed_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..3041dc9ffa82a0e9fa0e1a2a5dd859c80a6c311c --- /dev/null +++ b/packaging/sed_kh.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +function replace_community_kh() { + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/KingHistorian/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/khclient\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/CMakeLists.txt + sed -i "s/OUTPUT_NAME taos/OUTPUT_NAME khclient/g" ${top_dir}/src/kit/shell/CMakeLists.txt + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/kh_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos config/kinghistorian config/g" ${top_dir}/src/util/src/tconfig.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/khserverlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/khclientlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/khinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + echo "SET_TARGET_PROPERTIES(taosd PROPERTIES OUTPUT_NAME khserver)" >>${top_dir}/src/dnode/CMakeLists.txt + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/kit/shell/src/shellDarwin.c + sed -i "s/TDengine shell/KingHistorian shell/g" ${top_dir}/src/kit/shell/src/shellDarwin.c + sed -i "s/2020 by TAOS Data/2021 by Wellintech/g" ${top_dir}/src/kit/shell/src/shellDarwin.c + # src/kit/shell/src/shellLinux.c + sed -i "s/support@taosdata\.com/support@wellintech\.com/g" ${top_dir}/src/kit/shell/src/shellLinux.c + sed -i "s/TDengine shell/KingHistorian shell/g" ${top_dir}/src/kit/shell/src/shellLinux.c + sed -i "s/2020 by TAOS Data/2021 by Wellintech/g" ${top_dir}/src/kit/shell/src/shellLinux.c + # src/os/src/windows/wEnv.c + sed -i "s/C:\/TDengine/C:\/KingHistorian/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/KingHistorian shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2021 by Wellintech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/taos connect failed/khclient connect failed/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"khclient> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 10/g" ${top_dir}/src/kit/shell/src/shellEngine.c + # src/rpc/src/rpcMain.c + sed -i "s/taos connections/kh connections/g" ${top_dir}/src/rpc/src/rpcMain.c + # src/plugins/monitor/src/monMain.c + sed -i "s/taosd is quiting/khserver is quiting/g" ${top_dir}/src/plugins/monitor/src/monMain.c + + # packaging/tools/makepkg.sh + sed -i "s/productName=\"TDengine\"/productName=\"KingHistorian\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"kinghistorian\.cfg\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"kinghistorian\.tar\.gz\"/g" ${top_dir}/packaging/tools/makepkg.sh + # packaging/tools/remove.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/kinghistorian\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmkh\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/productName=\"TDengine\"/productName=\"KingHistorian\"/g" ${top_dir}/packaging/tools/remove.sh + # packaging/tools/startPre.sh + sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/startPre.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/kinghistorian\"/g" ${top_dir}/packaging/tools/startPre.sh + # packaging/tools/run_taosd.sh + sed -i "s/taosd/khserver/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/install.sh + sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"kinghistorian\.cfg\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/productName=\"TDengine\"/productName=\"KingHistorian\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/emailName=\"taosdata\.com\"/emailName=\"\wellintech\.com\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmkh\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/historyFile=\"taos_history\"/historyFile=\"kh_history\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"kinghistorian\.tar\.gz\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/kinghistorian\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/kinghistorian\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/kinghistorian\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/kinghistorian\"/g" ${top_dir}/packaging/tools/install.sh + + # packaging/tools/makeclient.sh + sed -i "s/productName=\"TDengine\"/productName=\"KingHistorian\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"kinghistorian\.cfg\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"kinghistorian\.tar\.gz\"/g" ${top_dir}/packaging/tools/makeclient.sh + # packaging/tools/remove_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/kinghistorian\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmkh\"/g" ${top_dir}/packaging/tools/remove_client.sh + # packaging/tools/install_client.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/kinghistorian\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/kinghistorian\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/productName=\"TDengine\"/productName=\"KingHistorian\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/kinghistorian\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/kinghistorian\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmkh\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"kinghistorian\.cfg\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"kinghistorian\.tar\.gz\"/g" ${top_dir}/packaging/tools/install_client.sh + + # packaging/tools/makearbi.sh + sed -i "s/productName=\"TDengine\"/productName=\"KingHistorian\"/g" ${top_dir}/packaging/tools/makearbi.sh + # packaging/tools/remove_arbi.sh + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/packaging/tools/remove_arbi.sh + # packaging/tools/install_arbi.sh + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/packaging/tools/install_arbi.sh + sed -i "s/taosdata\.com/wellintech\.com/g" ${top_dir}/packaging/tools/install_arbi.sh + + # packaging/tools/make_install.sh + sed -i "s/clientName=\"taos\"/clientName=\"khclient\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"khserver\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/kinghistorian\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/kinghistorian\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/kinghistorian\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"kinghistorian\.cfg\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/kinghistorian\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/productName=\"TDengine\"/productName=\"KingHistorian\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/emailName=\"taosdata\.com\"/emailName=\"wellintech\.com\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmkh\"/g" ${top_dir}/packaging/tools/make_install.sh + + # packaging/rpm/taosd + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/usr\/local\/taos/usr\/local\/kinghistorian/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/taosd/khserver/g" ${top_dir}/packaging/rpm/taosd + # packaging/deb/taosd + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/packaging/deb/taosd + sed -i "s/usr\/local\/taos/usr\/local\/kinghistorian/g" ${top_dir}/packaging/deb/taosd + sed -i "s/taosd/khserver/g" ${top_dir}/packaging/deb/taosd +} + +function replace_enterprise_kh() { + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + + # enterprise/src/plugins/web + sed -i -e "s/www\.taosdata\.com/www\.kingview\.com/g" $(grep -r "www.taosdata.com" ${top_dir}/../enterprise/src/plugins/web | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e "s/2017, TAOS Data/2021, Wellintech/g" $(grep -r "TAOS Data" ${top_dir}/../enterprise/src/plugins/web | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e "s/taosd/khserver/g" $(grep -r "taosd" ${top_dir}/../enterprise/src/plugins/web | grep -E "*\.js\s*.*" | sed -r -e "s/(.*\.js):\s*(.*)/\1/g" | sort | uniq) + # enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/taosd<\/th>/khserver<\/th>/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['khserver', 'system'\],/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'khserver',/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + # enterprise/src/plugins/web/admin/*.html + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/../enterprise/src/plugins/web/admin/*.html + # enterprise/src/plugins/web/admin/js/*.js + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/../enterprise/src/plugins/web/admin/js/*.js + +} diff --git a/packaging/sed_power.bat b/packaging/sed_power.bat new file mode 100644 index 0000000000000000000000000000000000000000..2b02504408e0f78335c1df1f15bf6fb25c97fc57 --- /dev/null +++ b/packaging/sed_power.bat @@ -0,0 +1,48 @@ +set sed="C:\Program Files\Git\usr\bin\sed.exe" +set community_dir=%1 + +::cmake\install.inc +%sed% -i "s/C:\/TDengine/C:\/Power/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.cfg/power\.cfg/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.exe/power\.exe/g" %community_dir%\cmake\install.inc +%sed% -i "/src\/connector/d" %community_dir%\cmake\install.inc +%sed% -i "/tests\/examples/d" %community_dir%\cmake\install.inc +::src\kit\shell\CMakeLists.txt +%sed% -i "s/OUTPUT_NAME taos/OUTPUT_NAME power/g" %community_dir%\src\kit\shell\CMakeLists.txt +::src\kit\shell\inc\shell.h +%sed% -i "s/taos_history/power_history/g" %community_dir%\src\kit\shell\inc\shell.h +::src\inc\taosdef.h +%sed% -i "s/\"taosdata\"/\"powerdb\"/g" %community_dir%\src\inc\taosdef.h +::src\util\src\tconfig.c +%sed% -i "s/taos\.cfg/power\.cfg/g" %community_dir%\src\util\src\tconfig.c +%sed% -i "s/etc\/taos/etc\/power/g" %community_dir%\src\util\src\tconfig.c +::src\util\src\tlog.c +%sed% -i "s/log\/taos/log\/power/g" %community_dir%\src\util\src\tlog.c +::src\dnode\src\dnodeSystem.c +%sed% -i "s/TDengine/Power/g" %community_dir%\src\dnode\src\dnodeSystem.c +::src\dnode\src\dnodeMain.c +%sed% -i "s/TDengine/Power/g" %community_dir%\src\dnode\src\dnodeMain.c +%sed% -i "s/taosdlog/powerdlog/g" %community_dir%\src\dnode\src\dnodeMain.c +::src\client\src\tscSystem.c +%sed% -i "s/taoslog/powerlog/g" %community_dir%\src\client\src\tscSystem.c +::src\util\src\tnote.c +%sed% -i "s/taosinfo/powerinfo/g" %community_dir%\src\util\src\tnote.c +::src\dnode\CMakeLists.txt +%sed% -i "s/taos\.cfg/power\.cfg/g" %community_dir%\src\dnode\CMakeLists.txt +::src\os\src\linux\linuxEnv.c +%sed% -i "s/etc\/taos/etc\/power/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/lib\/taos/lib\/power/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/log\/taos/log\/power/g" %community_dir%\src\os\src\linux\linuxEnv.c +::src\os\src\windows\wEnv.c +%sed% -i "s/TDengine/Power/g" %community_dir%\src\os\src\windows\wEnv.c +::src\kit\shell\src\shellEngine.c +%sed% -i "s/TDengine shell/Power shell/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/2020 by TAOS Data, Inc/2020 by PowerDB, Inc/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/taos connect failed/power connect failed/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\"taos^> \"/\"power^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\" -^> \"/\" -^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/prompt_size = 6/prompt_size = 7/g" %community_dir%\src\kit\shell\src\shellEngine.c +::src\rpc\src\rpcMain.c +%sed% -i "s/taos connections/power connections/g" %community_dir%\src\rpc\src\rpcMain.c +::src\plugins\monitor\src\monMain.c +%sed% -i "s/taosd is quiting/powerd is quiting/g" %community_dir%\src\plugins\monitor\src\monMain.c diff --git a/packaging/sed_power.sh b/packaging/sed_power.sh new file mode 100755 index 0000000000000000000000000000000000000000..8955476591410b6efac3aa410aab2cf257c1ac41 --- /dev/null +++ b/packaging/sed_power.sh @@ -0,0 +1,202 @@ +#!/bin/bash + +function replace_community_power() { + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/Power/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/power\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/powerdemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/power_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/util/src/tconfig.c + + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/powerdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/powerlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/powerinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + + # src/kit/shell/src/shellLinux.c + sed -i "s/TDengine shell/Power shell/g" ${top_dir}/src/kit/shell/src/shellLinux.c + + # src/os/src/windows/wEnv.c + sed -i "s/C:\/TDengine/C:\/Power/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/PowerDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2020 by PowerDB, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + + sed -i "s/\"taos> \"/\"power> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c + # src/rpc/src/rpcMain.c + sed -i "s/taos connections/power connections/g" ${top_dir}/src/rpc/src/rpcMain.c + # src/plugins/monitor/src/monMain.c + sed -i "s/taosd is quiting/powerd is quiting/g" ${top_dir}/src/plugins/monitor/src/monMain.c + + ############ + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/Power/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/power\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/CMakeLists.txt + sed -i "s/OUTPUT_NAME taos/OUTPUT_NAME power/g" ${top_dir}/src/kit/shell/CMakeLists.txt + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/power_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"power\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos config/power config/g" ${top_dir}/src/util/src/tconfig.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/powerdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/powerlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/powerinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + echo "SET_TARGET_PROPERTIES(taosd PROPERTIES OUTPUT_NAME powerd)" >>${top_dir}/src/dnode/CMakeLists.txt + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/kit/shell/src/shellDarwin.c + sed -i "s/TDengine shell/Power shell/g" ${top_dir}/src/kit/shell/src/shellDarwin.c + # src/kit/shell/src/shellLinux.c + sed -i "s/TDengine shell/Power shell/g" ${top_dir}/src/kit/shell/src/shellLinux.c + # src/os/src/windows/wEnv.c + sed -i "s/C:\/TDengine/C:\/Power/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/Power shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/taos connect failed/power connect failed/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"power> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c + # src/rpc/src/rpcMain.c + sed -i "s/taos connections/power connections/g" ${top_dir}/src/rpc/src/rpcMain.c + # src/plugins/monitor/src/monMain.c + sed -i "s/taosd is quiting/powerd is quiting/g" ${top_dir}/src/plugins/monitor/src/monMain.c + + # packaging/tools/makepkg.sh + sed -i "s/productName=\"TDengine\"/productName=\"Power\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"power\.cfg\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"power\.tar\.gz\"/g" ${top_dir}/packaging/tools/makepkg.sh + # packaging/tools/remove.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/power\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/serverName=\"taosd\"/serverName=\"pwerd\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpower\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/productName=\"TDengine\"/productName=\"Power\"/g" ${top_dir}/packaging/tools/remove.sh + # packaging/tools/startPre.sh + sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/startPre.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/Power\"/g" ${top_dir}/packaging/tools/startPre.sh + # packaging/tools/run_taosd.sh + sed -i "s/taosd/powerd/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/install.sh + sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"power\.cfg\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/productName=\"TDengine\"/productName=\"Power\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpower\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/historyFile=\"taos_history\"/historyFile=\"power_history\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"power\.tar\.gz\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/power\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/power\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/power\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/power\"/g" ${top_dir}/packaging/tools/install.sh + + # packaging/tools/makeclient.sh + sed -i "s/productName=\"TDengine\"/productName=\"Power\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"power\.cfg\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"power\.tar\.gz\"/g" ${top_dir}/packaging/tools/makeclient.sh + # packaging/tools/remove_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/power\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpower\"/g" ${top_dir}/packaging/tools/remove_client.sh + # packaging/tools/install_client.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/power\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/power\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/productName=\"TDengine\"/productName=\"Power\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/power\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/power\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpower\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"power\.cfg\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"power\.tar\.gz\"/g" ${top_dir}/packaging/tools/install_client.sh + + # packaging/tools/makearbi.sh + sed -i "s/productName=\"TDengine\"/productName=\"Power\"/g" ${top_dir}/packaging/tools/makearbi.sh + # packaging/tools/remove_arbi.sh + sed -i "s/TDengine/Power/g" ${top_dir}/packaging/tools/remove_arbi.sh + # packaging/tools/install_arbi.sh + sed -i "s/TDengine/Power/g" ${top_dir}/packaging/tools/install_arbi.sh + + # packaging/tools/make_install.sh + sed -i "s/clientName=\"taos\"/clientName=\"power\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"powerd\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/power\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/power\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/power\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"power\.cfg\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/power\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/productName=\"TDengine\"/productName=\"Power\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpower\"/g" ${top_dir}/packaging/tools/make_install.sh + + # packaging/rpm/taosd + sed -i "s/TDengine/Power/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/usr\/local\/taos/usr\/local\/power/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/taosd/powerd/g" ${top_dir}/packaging/rpm/taosd + # packaging/deb/taosd + sed -i "s/TDengine/Power/g" ${top_dir}/packaging/deb/taosd + sed -i "s/usr\/local\/taos/usr\/local\/power/g" ${top_dir}/packaging/deb/taosd + sed -i "s/taosd/powerd/g" ${top_dir}/packaging/deb/taosd +} + +function replace_enterprise_power() { + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/Power/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + + # enterprise/src/plugins/web + sed -i -e "s/taosd/powerd/g" $(grep -r "taosd" ${top_dir}/../enterprise/src/plugins/web | grep -E "*\.js\s*.*" | sed -r -e "s/(.*\.js):\s*(.*)/\1/g" | sort | uniq) + # enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/taosd<\/th>/powerd<\/th>/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['powerd', 'system'\],/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'powerd',/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + # enterprise/src/plugins/web/admin/*.html + sed -i "s/TDengine/Power/g" ${top_dir}/../enterprise/src/plugins/web/admin/*.html + # enterprise/src/plugins/web/admin/js/*.js + sed -i "s/TDengine/Power/g" ${top_dir}/../enterprise/src/plugins/web/admin/js/*.js + +} diff --git a/packaging/sed_pro.bat b/packaging/sed_pro.bat new file mode 100644 index 0000000000000000000000000000000000000000..fe4447dc77670d12f7c11553e57c6161a7df640e --- /dev/null +++ b/packaging/sed_pro.bat @@ -0,0 +1,55 @@ +set sed="C:\Program Files\Git\usr\bin\sed.exe" +set community_dir=%1 + +::cmake\install.inc +%sed% -i "s/C:\/TDengine/C:\/ProDB/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.cfg/prodb\.cfg/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.exe/prodbc\.exe/g" %community_dir%\cmake\install.inc +%sed% -i "/src\/connector/d" %community_dir%\cmake\install.inc +%sed% -i "/tests\/examples/d" %community_dir%\cmake\install.inc +::src\kit\shell\CMakeLists.txt +%sed% -i "s/OUTPUT_NAME taos/OUTPUT_NAME prodbc/g" %community_dir%\src\kit\shell\CMakeLists.txt +::src\kit\shell\inc\shell.h +%sed% -i "s/taos_history/prodb_history/g" %community_dir%\src\kit\shell\inc\shell.h +::src\inc\taosdef.h +%sed% -i "s/\"taosdata\"/\"prodb\"/g" %community_dir%\src\inc\taosdef.h +::src\util\src\tconfig.c +%sed% -i "s/taos\.cfg/prodb\.cfg/g" %community_dir%\src\util\src\tconfig.c +%sed% -i "s/etc\/taos/etc\/ProDB/g" %community_dir%\src\util\src\tconfig.c +::src\util\src\tlog.c +%sed% -i "s/log\/taos/log\/ProDB/g" %community_dir%\src\util\src\tlog.c +::src\dnode\src\dnodeSystem.c +%sed% -i "s/TDengine/ProDB/g" %community_dir%\src\dnode\src\dnodeSystem.c +::src\dnode\src\dnodeMain.c +%sed% -i "s/TDengine/ProDB/g" %community_dir%\src\dnode\src\dnodeMain.c +%sed% -i "s/taosdlog/prodlog/g" %community_dir%\src\dnode\src\dnodeMain.c +::src\client\src\tscSystem.c +%sed% -i "s/taoslog/prolog/g" %community_dir%\src\client\src\tscSystem.c +::src\util\src\tnote.c +%sed% -i "s/taosinfo/proinfo/g" %community_dir%\src\util\src\tnote.c +::src\dnode\CMakeLists.txt +%sed% -i "s/taos\.cfg/prodb\.cfg/g" %community_dir%\src\dnode\CMakeLists.txt +::src\os\src\linux\linuxEnv.c +%sed% -i "s/etc\/taos/etc\/ProDB/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/lib\/taos/lib\/ProDB/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/log\/taos/log\/ProDB/g" %community_dir%\src\os\src\linux\linuxEnv.c +::src\kit\shell\src\shellDarwin.c +%sed% -i "s/TDengine shell/ProDB shell/g" %community_dir%\src\kit\shell\src\shellDarwin.c +%sed% -i "s/2020 by TAOS Data/2021 by HanaTech/g" %community_dir%\src\kit\shell\src\shellDarwin.c +::src\kit\shell\src\shellLinux.c +%sed% -i "s/support@taosdata\.com/support@hanatech\.com\.cn/g" %community_dir%\src\kit\shell\src\shellLinux.c +%sed% -i "s/TDengine shell/ProDB shell/g" %community_dir%\src\kit\shell\src\shellLinux.c +%sed% -i "s/2020 by TAOS Data/2021 by HanaTech/g" %community_dir%\src\kit\shell\src\shellLinux.c +::src\os\src\windows\wEnv.c +%sed% -i "s/TDengine/ProDB/g" %community_dir%\src\os\src\windows\wEnv.c +::src\kit\shell\src\shellEngine.c +%sed% -i "s/TDengine shell/ProDB shell/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/2020 by TAOS Data, Inc/2021 by HanaTech, Inc/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/taos connect failed/prodbc connect failed/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\"taos^> \"/\"ProDB^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\" -^> \"/\" -^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/prompt_size = 6/prompt_size = 7/g" %community_dir%\src\kit\shell\src\shellEngine.c +::src\rpc\src\rpcMain.c +%sed% -i "s/taos connections/prodbc connections/g" %community_dir%\src\rpc\src\rpcMain.c +::src\plugins\monitor\src\monMain.c +%sed% -i "s/taosd is quiting/prodbs is quiting/g" %community_dir%\src\plugins\monitor\src\monMain.c diff --git a/packaging/sed_pro.sh b/packaging/sed_pro.sh new file mode 100755 index 0000000000000000000000000000000000000000..e7fdaeda4c68f4dfc76d4d879f20f83c123238c1 --- /dev/null +++ b/packaging/sed_pro.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +function replace_community_pro() { + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/ProDB/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/prodbc\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/CMakeLists.txt + sed -i "s/OUTPUT_NAME taos/OUTPUT_NAME prodbc/g" ${top_dir}/src/kit/shell/CMakeLists.txt + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/prodb_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/prodlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/prolog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/proinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + echo "SET_TARGET_PROPERTIES(taosd PROPERTIES OUTPUT_NAME prodbs)" >>${top_dir}/src/dnode/CMakeLists.txt + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/kit/shell/src/shellDarwin.c + sed -i "s/TDengine shell/ProDB shell/g" ${top_dir}/src/kit/shell/src/shellDarwin.c + sed -i "s/2020 by TAOS Data/2021 by HanaTech/g" ${top_dir}/src/kit/shell/src/shellDarwin.c + # src/kit/shell/src/shellLinux.c + sed -i "s/support@taosdata\.com/support@hanatech\.com\.cn/g" ${top_dir}/src/kit/shell/src/shellLinux.c + sed -i "s/TDengine shell/ProDB shell/g" ${top_dir}/src/kit/shell/src/shellLinux.c + sed -i "s/2020 by TAOS Data/2021 by HanaTech/g" ${top_dir}/src/kit/shell/src/shellLinux.c + # src/os/src/windows/wEnv.c + sed -i "s/C:\/TDengine/C:\/ProDB/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/ProDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2021 by Hanatech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/taos connect failed/prodbc connect failed/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"ProDB> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c + # src/rpc/src/rpcMain.c + sed -i "s/taos connections/prodbc connections/g" ${top_dir}/src/rpc/src/rpcMain.c + # src/plugins/monitor/src/monMain.c + sed -i "s/taosd is quiting/prodbs is quiting/g" ${top_dir}/src/plugins/monitor/src/monMain.c + + # packaging/tools/makepkg.sh + sed -i "s/productName=\"TDengine\"/productName=\"ProDB\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"prodb\.cfg\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"prodb\.tar\.gz\"/g" ${top_dir}/packaging/tools/makepkg.sh + # packaging/tools/remove.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/ProDB\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpro\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/productName=\"TDengine\"/productName=\"ProDB\"/g" ${top_dir}/packaging/tools/remove.sh + # packaging/tools/startPre.sh + sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/startPre.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/ProDB\"/g" ${top_dir}/packaging/tools/startPre.sh + # packaging/tools/run_taosd.sh + sed -i "s/taosd/prodbs/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/install.sh + sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"prodb\.cfg\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/productName=\"TDengine\"/productName=\"ProDB\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/emailName=\"taosdata\.com\"/emailName=\"\hanatech\.com\.cn\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpro\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/historyFile=\"taos_history\"/historyFile=\"prodb_history\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"prodb\.tar\.gz\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/ProDB\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/ProDB\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/ProDB\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/ProDB\"/g" ${top_dir}/packaging/tools/install.sh + + # packaging/tools/makeclient.sh + sed -i "s/productName=\"TDengine\"/productName=\"ProDB\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"prodb\.cfg\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"prodb\.tar\.gz\"/g" ${top_dir}/packaging/tools/makeclient.sh + # packaging/tools/remove_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/ProDB\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpro\"/g" ${top_dir}/packaging/tools/remove_client.sh + # packaging/tools/install_client.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/ProDB\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/ProDB\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/productName=\"TDengine\"/productName=\"ProDB\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/ProDB\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/ProDB\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpro\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"prodb\.cfg\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"prodb\.tar\.gz\"/g" ${top_dir}/packaging/tools/install_client.sh + + # packaging/tools/makearbi.sh + sed -i "s/productName=\"TDengine\"/productName=\"ProDB\"/g" ${top_dir}/packaging/tools/makearbi.sh + # packaging/tools/remove_arbi.sh + sed -i "s/TDengine/ProDB/g" ${top_dir}/packaging/tools/remove_arbi.sh + # packaging/tools/install_arbi.sh + sed -i "s/TDengine/ProDB/g" ${top_dir}/packaging/tools/install_arbi.sh + sed -i "s/taosdata\.com/hanatech\.com\.cn/g" ${top_dir}/packaging/tools/install_arbi.sh + + # packaging/tools/make_install.sh + sed -i "s/clientName=\"taos\"/clientName=\"prodbc\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"prodbs\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/ProDB\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/ProDB\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/ProDB\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"prodb\.cfg\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/ProDB\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/productName=\"TDengine\"/productName=\"ProDB\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/emailName=\"taosdata\.com\"/emailName=\"hanatech\.com\.cn\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmpro\"/g" ${top_dir}/packaging/tools/make_install.sh + + # packaging/rpm/taosd + sed -i "s/TDengine/ProDB/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/usr\/local\/taos/usr\/local\/ProDB/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/taosd/prodbs/g" ${top_dir}/packaging/rpm/taosd + # packaging/deb/taosd + sed -i "s/TDengine/ProDB/g" ${top_dir}/packaging/deb/taosd + sed -i "s/usr\/local\/taos/usr\/local\/ProDB/g" ${top_dir}/packaging/deb/taosd + sed -i "s/taosd/prodbs/g" ${top_dir}/packaging/deb/taosd + +} + +function replace_enterprise_pro() { + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + + # enterprise/src/plugins/web + sed -i -e "s/www\.taosdata\.com/www\.hanatech\.com\.cn/g" $(grep -r "www.taosdata.com" ${top_dir}/../enterprise/src/plugins/web | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e "s/2017, TAOS Data/2021, Hanatech/g" $(grep -r "TAOS Data" ${top_dir}/../enterprise/src/plugins/web | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e "s/taosd/prodbs/g" $(grep -r "taosd" ${top_dir}/../enterprise/src/plugins/web | grep -E "*\.js\s*.*" | sed -r -e "s/(.*\.js):\s*(.*)/\1/g" | sort | uniq) + # enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/taosd<\/th>/prodbs<\/th>/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['prodbs', 'system'\],/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'prodbs',/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + # enterprise/src/plugins/web/admin/*.html + sed -i "s/TDengine/ProDB/g" ${top_dir}/../enterprise/src/plugins/web/admin/*.html + # enterprise/src/plugins/web/admin/js/*.js + sed -i "s/TDengine/ProDB/g" ${top_dir}/../enterprise/src/plugins/web/admin/js/*.js +} diff --git a/packaging/sed_tq.bat b/packaging/sed_tq.bat new file mode 100644 index 0000000000000000000000000000000000000000..f8131eac3055e65dfe5289b58f2ac044cd79bd99 --- /dev/null +++ b/packaging/sed_tq.bat @@ -0,0 +1,46 @@ +set sed="C:\Program Files\Git\usr\bin\sed.exe" +set community_dir=%1 + +::cmake\install.inc +%sed% -i "s/C:\/TDengine/C:\/TQ/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.cfg/tq\.cfg/g" %community_dir%\cmake\install.inc +%sed% -i "s/taos\.exe/tq\.exe/g" %community_dir%\cmake\install.inc +%sed% -i "/src\/connector/d" %community_dir%\cmake\install.inc +%sed% -i "/tests\/examples/d" %community_dir%\cmake\install.inc +::src\kit\shell\CMakeLists.txt +%sed% -i "s/OUTPUT_NAME taos/OUTPUT_NAME tq/g" %community_dir%\src\kit\shell\CMakeLists.txt +::src\kit\shell\inc\shell.h +%sed% -i "s/taos_history/tq_history/g" %community_dir%\src\kit\shell\inc\shell.h +::src\inc\taosdef.h +%sed% -i "s/\"taosdata\"/\"tqueue\"/g" %community_dir%\src\inc\taosdef.h +::src\util\src\tconfig.c +%sed% -i "s/taos\.cfg/tq\.cfg/g" %community_dir%\src\util\src\tconfig.c +%sed% -i "s/etc\/taos/etc\/tq/g" %community_dir%\src\util\src\tconfig.c +::src\util\src\tlog.c +%sed% -i "s/log\/taos/log\/tq/g" %community_dir%\src\util\src\tlog.c +::src\dnode\src\dnodeSystem.c +%sed% -i "s/TDengine/TQ/g" %community_dir%\src\dnode\src\dnodeSystem.c +::src\dnode\src\dnodeMain.c +%sed% -i "s/TDengine/TQ/g" %community_dir%\src\dnode\src\dnodeMain.c +%sed% -i "s/taosdlog/tqdlog/g" %community_dir%\src\dnode\src\dnodeMain.c +::src\client\src\tscSystem.c +%sed% -i "s/taoslog/tqlog/g" %community_dir%\src\client\src\tscSystem.c +::src\util\src\tnote.c +%sed% -i "s/taosinfo/tqinfo/g" %community_dir%\src\util\src\tnote.c +::src\dnode\CMakeLists.txt +%sed% -i "s/taos\.cfg/tq\.cfg/g" %community_dir%\src\dnode\CMakeLists.txt +::src\os\src\linux\linuxEnv.c +%sed% -i "s/etc\/taos/etc\/tq/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/lib\/taos/lib\/tq/g" %community_dir%\src\os\src\linux\linuxEnv.c +%sed% -i "s/log\/taos/log\/tq/g" %community_dir%\src\os\src\linux\linuxEnv.c +::src\os\src\windows\wEnv.c +%sed% -i "s/TDengine/TQ/g" %community_dir%\src\os\src\windows\wEnv.c +::src\kit\shell\src\shellEngine.c +%sed% -i "s/TDengine shell/TQ shell/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\"taos^> \"/\"tq^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/\" -^> \"/\" -^> \"/g" %community_dir%\src\kit\shell\src\shellEngine.c +%sed% -i "s/prompt_size = 6/prompt_size = 4/g" %community_dir%\src\kit\shell\src\shellEngine.c +::src\rpc\src\rpcMain.c +%sed% -i "s/taos connections/tq connections/g" %community_dir%\src\rpc\src\rpcMain.c +::src\plugins\monitor\src\monMain.c +%sed% -i "s/taosd is quiting/tqd is quiting/g" %community_dir%\src\plugins\monitor\src\monMain.c diff --git a/packaging/sed_tq.sh b/packaging/sed_tq.sh new file mode 100755 index 0000000000000000000000000000000000000000..412abb1fa702839a8d9a789c7860155a120419c6 --- /dev/null +++ b/packaging/sed_tq.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +function replace_community_tq() { + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/TQ/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/tq\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/CMakeLists.txt + sed -i "s/OUTPUT_NAME taos/OUTPUT_NAME tq/g" ${top_dir}/src/kit/shell/CMakeLists.txt + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/tq_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos config/tq config/g" ${top_dir}/src/util/src/tconfig.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/TQ/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/TQ/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/tqdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/tqlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/tqinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + echo "SET_TARGET_PROPERTIES(taosd PROPERTIES OUTPUT_NAME tqd)" >>${top_dir}/src/dnode/CMakeLists.txt + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/kit/shell/src/shellDarwin.c + sed -i "s/TDengine shell/TQ shell/g" ${top_dir}/src/kit/shell/src/shellDarwin.c + # src/kit/shell/src/shellLinux.c + sed -i "s/TDengine shell/TQ shell/g" ${top_dir}/src/kit/shell/src/shellLinux.c + # src/os/src/windows/wEnv.c + sed -i "s/C:\/TDengine/C:\/TQ/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/TQ shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/taos connect failed/tq connect failed/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"tq> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c + # src/rpc/src/rpcMain.c + sed -i "s/taos connections/tq connections/g" ${top_dir}/src/rpc/src/rpcMain.c + # src/plugins/monitor/src/monMain.c + sed -i "s/taosd is quiting/tqd is quiting/g" ${top_dir}/src/plugins/monitor/src/monMain.c + + # packaging/tools/makepkg.sh + sed -i "s/productName=\"TDengine\"/productName=\"TQ\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"tq\.cfg\"/g" ${top_dir}/packaging/tools/makepkg.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"tq\.tar\.gz\"/g" ${top_dir}/packaging/tools/makepkg.sh + # packaging/tools/remove.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/tq\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmtq\"/g" ${top_dir}/packaging/tools/remove.sh + sed -i "s/productName=\"TDengine\"/productName=\"TQ\"/g" ${top_dir}/packaging/tools/remove.sh + # packaging/tools/startPre.sh + sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/startPre.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/tq\"/g" ${top_dir}/packaging/tools/startPre.sh + # packaging/tools/run_taosd.sh + sed -i "s/taosd/tqd/g" ${top_dir}/packaging/tools/run_taosd.sh + # packaging/tools/install.sh + sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"tq\.cfg\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/productName=\"TDengine\"/productName=\"TQ\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmtq\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/historyFile=\"taos_history\"/historyFile=\"tq_history\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"tq\.tar\.gz\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/tq\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/tq\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/tq\"/g" ${top_dir}/packaging/tools/install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/tq\"/g" ${top_dir}/packaging/tools/install.sh + + # packaging/tools/makeclient.sh + sed -i "s/productName=\"TDengine\"/productName=\"TQ\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"tq\.cfg\"/g" ${top_dir}/packaging/tools/makeclient.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"tq\.tar\.gz\"/g" ${top_dir}/packaging/tools/makeclient.sh + # packaging/tools/remove_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/tq\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/remove_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmtq\"/g" ${top_dir}/packaging/tools/remove_client.sh + # packaging/tools/install_client.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/tq\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/tq\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/productName=\"TDengine\"/productName=\"TQ\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/tq\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/tq\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmtq\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"tq\.cfg\"/g" ${top_dir}/packaging/tools/install_client.sh + sed -i "s/tarName=\"taos\.tar\.gz\"/tarName=\"tq\.tar\.gz\"/g" ${top_dir}/packaging/tools/install_client.sh + + # packaging/tools/makearbi.sh + sed -i "s/productName=\"TDengine\"/productName=\"TQ\"/g" ${top_dir}/packaging/tools/makearbi.sh + # packaging/tools/remove_arbi.sh + sed -i "s/TDengine/TQ/g" ${top_dir}/packaging/tools/remove_arbi.sh + # packaging/tools/install_arbi.sh + sed -i "s/TDengine/TQ/g" ${top_dir}/packaging/tools/install_arbi.sh + + # packaging/tools/make_install.sh + sed -i "s/clientName=\"taos\"/clientName=\"tq\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/serverName=\"taosd\"/serverName=\"tqd\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/logDir=\"\/var\/log\/taos\"/logDir=\"\/var\/log\/tq\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/dataDir=\"\/var\/lib\/taos\"/dataDir=\"\/var\/lib\/tq\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configDir=\"\/etc\/taos\"/configDir=\"\/etc\/tq\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/configFile=\"taos\.cfg\"/configFile=\"tq\.cfg\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/installDir=\"\/usr\/local\/taos\"/installDir=\"\/usr\/local\/tq\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/productName=\"TDengine\"/productName=\"TQ\"/g" ${top_dir}/packaging/tools/make_install.sh + sed -i "s/uninstallScript=\"rmtaos\"/uninstallScript=\"rmtq\"/g" ${top_dir}/packaging/tools/make_install.sh + + # packaging/rpm/taosd + sed -i "s/TDengine/TQ/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/usr\/local\/taos/usr\/local\/tq/g" ${top_dir}/packaging/rpm/taosd + sed -i "s/taosd/tqd/g" ${top_dir}/packaging/rpm/taosd + # packaging/deb/taosd + sed -i "s/TDengine/TQ/g" ${top_dir}/packaging/deb/taosd + sed -i "s/usr\/local\/taos/usr\/local\/tq/g" ${top_dir}/packaging/deb/taosd + sed -i "s/taosd/tqd/g" ${top_dir}/packaging/deb/taosd +} + +function replace_enterprise_tq() { + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/TQ/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + + # enterprise/src/plugins/web + sed -i -e "s/taosd/tqd/g" $(grep -r "taosd" ${top_dir}/../enterprise/src/plugins/web | grep -E "*\.js\s*.*" | sed -r -e "s/(.*\.js):\s*(.*)/\1/g" | sort | uniq) + # enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/taosd<\/th>/tqd<\/th>/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['tqd', 'system'\],/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'tqd',/g" ${top_dir}/../enterprise/src/plugins/web/admin/monitor.html + # enterprise/src/plugins/web/admin/*.html + sed -i "s/TDengine/TQ/g" ${top_dir}/../enterprise/src/plugins/web/admin/*.html + # enterprise/src/plugins/web/admin/js/*.js + sed -i "s/TDengine/TQ/g" ${top_dir}/../enterprise/src/plugins/web/admin/js/*.js +} diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index ed14e10ae96cf31e18c4a99b9fcee8c452a5ab3a..f8d4bf167d32302c89e7307b4a83fe5428f05913 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -15,13 +15,22 @@ serverFqdn="" # -----------------------Variables definition--------------------- script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory -data_dir="/var/lib/taos" -log_dir="/var/log/taos" - -data_link_dir="/usr/local/taos/data" -log_link_dir="/usr/local/taos/log" - -cfg_install_dir="/etc/taos" +clientName="taos" +serverName="taosd" +configFile="taos.cfg" +productName="TDengine" +emailName="taosdata.com" +uninstallScript="rmtaos" +historyFile="taos_history" +tarName="taos.tar.gz" +dataDir="/var/lib/taos" +logDir="/var/log/taos" +configDir="/etc/taos" +installDir="/usr/local/taos" + +data_dir=${dataDir} +log_dir=${logDir} +cfg_install_dir=${configDir} bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" @@ -29,13 +38,10 @@ lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" #install main path -install_main_dir="/usr/local/taos" +install_main_dir=${installDir} # old bin dir -bin_dir="/usr/local/taos/bin" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/taos" +bin_dir="${installDir}/bin" service_config_dir="/etc/systemd/system" nginx_port=6060 @@ -49,8 +55,8 @@ GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " +if command -v sudo >/dev/null; then + csudo="sudo " fi update_flag=0 @@ -58,52 +64,51 @@ prompt_force=0 initd_mod=0 service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else +if pidof systemd &>/dev/null; then + service_mod=0 +elif $(which service &>/dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &>/dev/null); then + initd_mod=1 + elif $(which insserv &>/dev/null); then + initd_mod=2 + elif $(which update-rc.d &>/dev/null); then + initd_mod=3 + else service_mod=2 + fi +else + service_mod=2 fi - # get the operating system type for using the corresponding init file # ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification #osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || : else osinfo="" fi #echo "osinfo: ${osinfo}" os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" +if echo $osinfo | grep -qwi "ubuntu"; then + # echo "This is ubuntu system" os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" +elif echo $osinfo | grep -qwi "debian"; then + # echo "This is debian system" os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" +elif echo $osinfo | grep -qwi "Kylin"; then + # echo "This is Kylin system" os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" +elif echo $osinfo | grep -qwi "centos"; then + # echo "This is centos system" os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" +elif echo $osinfo | grep -qwi "fedora"; then + # echo "This is fedora system" os_type=2 -elif echo $osinfo | grep -qwi "Linx" ; then -# echo "This is Linx system" +elif echo $osinfo | grep -qwi "Linx"; then + # echo "This is Linx system" os_type=1 service_mod=0 initd_mod=0 @@ -112,43 +117,38 @@ else echo " osinfo: ${osinfo}" echo " This is an officially unverified linux system," echo " if there are any problems with the installation and operation, " - echo " please feel free to contact taosdata.com for support." + echo " please feel free to contact ${emailName} for support." os_type=1 fi - # ============================= get input parameters ================================================= - -# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] - # set parameters by default value -interactiveFqdn=yes # [yes | no] -verType=server # [server | client] -initType=systemd # [systemd | service | ...] +interactiveFqdn=yes # [yes | no] +verType=server # [server | client] +initType=systemd # [systemd | service | ...] -while getopts "hv:e:i:" arg -do +while getopts "hv:e:i:" arg; do case $arg in - e) - #echo "interactiveFqdn=$OPTARG" - interactiveFqdn=$( echo $OPTARG ) - ;; - v) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - i) - #echo "initType=$OPTARG" - initType=$(echo $OPTARG) - ;; - h) - echo "Usage: `basename $0` -v [server | client] -e [yes | no]" - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; + e) + #echo "interactiveFqdn=$OPTARG" + interactiveFqdn=$(echo $OPTARG) + ;; + v) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + i) + #echo "initType=$OPTARG" + initType=$(echo $OPTARG) + ;; + h) + echo "Usage: $(basename $0) -v [server | client] -e [yes | no]" + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; esac done @@ -157,169 +157,161 @@ done function kill_process() { pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : + ${csudo}kill -9 $pid || : fi } function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin -# ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include -# ${csudo}mkdir -p ${install_main_dir}/init.d - if [ "$verMode" == "cluster" ]; then - ${csudo}mkdir -p ${nginx_dir} - fi + #create install main dir and all sub dir + ${csudo}rm -rf ${install_main_dir} || : + ${csudo}mkdir -p ${install_main_dir} + ${csudo}mkdir -p ${install_main_dir}/cfg + ${csudo}mkdir -p ${install_main_dir}/bin + # ${csudo}mkdir -p ${install_main_dir}/connector + ${csudo}mkdir -p ${install_main_dir}/driver + ${csudo}mkdir -p ${install_main_dir}/examples + ${csudo}mkdir -p ${install_main_dir}/include + # ${csudo}mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo}mkdir -p ${nginx_dir} + fi - if [[ -e ${script_dir}/email ]]; then - ${csudo}cp ${script_dir}/email ${install_main_dir}/ ||: - fi + if [[ -e ${script_dir}/email ]]; then + ${csudo}cp ${script_dir}/email ${install_main_dir}/ || : + fi } function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/taos || : - ${csudo}rm -f ${bin_link_dir}/taosd || : - ${csudo}rm -f ${bin_link_dir}/taosadapter || : - ${csudo}rm -f ${bin_link_dir}/taosdemo || : - ${csudo}rm -f ${bin_link_dir}/taosdump || : - ${csudo}rm -f ${bin_link_dir}/rmtaos || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/taos ] && ${csudo}ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosd ] && ${csudo}ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : - [ -x ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -s ${install_main_dir}/bin/taosBenchmark ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : - - if [ "$verMode" == "cluster" ]; then - ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/* - ${csudo}mkdir -p ${nginx_dir}/logs - ${csudo}chmod 777 ${nginx_dir}/sbin/nginx - fi + # Remove links + ${csudo}rm -f ${bin_link_dir}/${clientName} || : + ${csudo}rm -f ${bin_link_dir}/${serverName} || : + ${csudo}rm -f ${bin_link_dir}/taosadapter || : + ${csudo}rm -f ${bin_link_dir}/taosdemo || : + ${csudo}rm -f ${bin_link_dir}/taosdump || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + ${csudo}rm -f ${bin_link_dir}/tarbitrator || : + ${csudo}rm -f ${bin_link_dir}/set_core || : + ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : + + ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : + [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || : + [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : + [ -x ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -s ${install_main_dir}/bin/taosBenchmark ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : + + if [ "$verMode" == "cluster" ]; then + ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/* + ${csudo}mkdir -p ${nginx_dir}/logs + ${csudo}chmod 777 ${nginx_dir}/sbin/nginx + fi } function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi + # Remove links + ${csudo}rm -f ${lib_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - #if [ "$verMode" == "cluster" ]; then - # # Compatible with version 1.5 - # ${csudo}mkdir -p ${v15_java_app_dir} - # ${csudo}ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar - # ${csudo}chmod 777 ${v15_java_app_dir} || : - #fi + ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - ${csudo}ldconfig + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + + ${csudo}ldconfig } function install_avro() { - if [ "$osType" != "Darwin" ]; then - avro_dir=${script_dir}/avro - if [ -f "${avro_dir}/lib/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/$1 - ${csudo}/usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.so.23.0.0 /usr/local/$1 - ${csudo}ln -sf /usr/local/$1/libavro.so.23.0.0 /usr/local/$1/libavro.so.23 - ${csudo}ln -sf /usr/local/$1/libavro.so.23 /usr/local/$1/libavro.so - - ${csudo}/usr/bin/install -c -d /usr/local/$1 - [ -f ${avro_dir}/lib/libavro.a ] && - ${csudo}/usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.a /usr/local/$1 - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/$1" | ${csudo}tee /etc/ld.so.conf.d/libavro.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi + if [ "$osType" != "Darwin" ]; then + avro_dir=${script_dir}/avro + if [ -f "${avro_dir}/lib/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/$1 + ${csudo}/usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.so.23.0.0 /usr/local/$1 + ${csudo}ln -sf /usr/local/$1/libavro.so.23.0.0 /usr/local/$1/libavro.so.23 + ${csudo}ln -sf /usr/local/$1/libavro.so.23 /usr/local/$1/libavro.so + + ${csudo}/usr/bin/install -c -d /usr/local/$1 + [ -f ${avro_dir}/lib/libavro.a ] && + ${csudo}/usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.a /usr/local/$1 + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/$1" | ${csudo}tee /etc/ld.so.conf.d/libavro.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf" + ${csudo}ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi fi + fi } function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc + jemalloc_dir=${script_dir}/jemalloc - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin + if [ -d ${jemalloc_dir} ]; then + ${csudo}/usr/bin/install -c -d /usr/local/bin - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo}/usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" + ${csudo}ldconfig + else + echo "/etc/ld.so.conf.d not found!" fi + fi } function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h + ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : + ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* + ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h + ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function add_newHostname_to_hosts() { @@ -329,18 +321,17 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in "${arr[@]}" - do + for s in "${arr[@]}"; do if [[ "$s" == "$localIp" ]]; then return fi done - ${csudo}echo "127.0.0.1 $1" >> /etc/hosts ||: + ${csudo}echo "127.0.0.1 $1" >>/etc/hosts || : } function set_hostname() { echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname + read newHostname while true; do if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then break @@ -349,12 +340,12 @@ function set_hostname() { fi done - ${csudo}hostname $newHostname ||: - retval=`echo $?` + ${csudo}hostname $newHostname || : + retval=$(echo $?) if [[ $retval != 0 ]]; then - echo - echo "set hostname fail!" - return + echo + echo "set hostname fail!" + return fi #echo -e -n "$(hostnamectl status --static)" #echo -e -n "$(hostnamectl status --transient)" @@ -362,15 +353,15 @@ function set_hostname() { #ubuntu/centos /etc/hostname if [[ -e /etc/hostname ]]; then - ${csudo}echo $newHostname > /etc/hostname ||: + ${csudo}echo $newHostname >/etc/hostname || : fi #debian: #HOSTNAME=yourname if [[ -e /etc/sysconfig/network ]]; then - ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network || : fi - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/${configFile} serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -384,20 +375,19 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in "${arr[@]}" - do - if [[ "$s" == "$newIp" ]]; then - return 0 - fi + for s in "${arr[@]}"; do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi done return 1 } function set_ipAsFqdn() { - iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + iplist=$(ip address | grep inet | grep -v inet6 | grep -v 127.0.0.1 | awk '{print $2}' | awk -F "/" '{print $1}') || : if [ -z "$iplist" ]; then - iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + iplist=$(ifconfig | grep inet | grep -v inet6 | grep -v 127.0.0.1 | awk '{print $2}' | awk -F ":" '{print $2}') || : fi if [ -z "$iplist" ]; then @@ -405,7 +395,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile} serverFqdn=$localFqdn echo return @@ -418,23 +408,23 @@ function set_ipAsFqdn() { echo echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" read localFqdn - while true; do - if [ ! -z "$localFqdn" ]; then - # Check if correct ip address - is_correct_ipaddr $localFqdn - retval=`echo $?` - if [[ $retval != 0 ]]; then - read -p "Please choose an IP from local IP list:" localFqdn - else - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg - serverFqdn=$localFqdn - break - fi - else + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=$(echo $?) + if [[ $retval != 0 ]]; then read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/${configFile} + serverFqdn=$localFqdn + break fi - done + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done } function local_fqdn_check() { @@ -442,672 +432,572 @@ function local_fqdn_check() { echo echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" echo - while true - do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi + while true; do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS] | [yY]) + set_hostname + break + ;; + + [nN][oO] | [nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi done fi } function install_taosadapter_config() { - if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taosadapter.toml ] && ${csudo}cp ${script_dir}/cfg/taosadapter.toml ${cfg_install_dir} - [ -f ${cfg_install_dir}/taosadapter.toml ] && ${csudo}chmod 644 ${cfg_install_dir}/taosadapter.toml - fi + if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then + ${csudo}mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taosadapter.toml ] && ${csudo}cp ${script_dir}/cfg/taosadapter.toml ${cfg_install_dir} + [ -f ${cfg_install_dir}/taosadapter.toml ] && ${csudo}chmod 644 ${cfg_install_dir}/taosadapter.toml + fi - [ -f ${script_dir}/cfg/taosadapter.toml ] && - ${csudo}cp -f ${script_dir}/cfg/taosadapter.toml ${cfg_install_dir}/taosadapter.toml.new + [ -f ${script_dir}/cfg/taosadapter.toml ] && + ${csudo}cp -f ${script_dir}/cfg/taosadapter.toml ${cfg_install_dir}/taosadapter.toml.new - [ -f ${cfg_install_dir}/taosadapter.toml ] && - ${csudo}ln -s ${cfg_install_dir}/taosadapter.toml ${install_main_dir}/cfg/taosadapter.toml + [ -f ${cfg_install_dir}/taosadapter.toml ] && + ${csudo}ln -s ${cfg_install_dir}/taosadapter.toml ${install_main_dir}/cfg/taosadapter.toml - [ ! -z $1 ] && return 0 || : # only install client + [ ! -z $1 ] && return 0 || : # only install client } function install_config() { - #${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo}cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi + if [ ! -f "${cfg_install_dir}/${configFile}" ]; then + ${csudo}mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} + ${csudo}chmod 644 ${cfg_install_dir}/* + fi - ${csudo}cp -f ${script_dir}/cfg/taos.cfg ${cfg_install_dir}/taos.cfg.new - ${csudo}ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new + ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg - [ ! -z $1 ] && return 0 || : # only install client + [ ! -z $1 ] && return 0 || : # only install client - if ((${update_flag}==1)); then - return 0 - fi + if ((${update_flag} == 1)); then + return 0 + fi - if [ "$interactiveFqdn" == "no" ]; then - return 0 - fi + if [ "$interactiveFqdn" == "no" ]; then + return 0 + fi - local_fqdn_check + local_fqdn_check - #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" - #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" - #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" - #FQDN_PATTERN=":[0-9]{1,5}$" + #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" + #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" + #FQDN_PATTERN=":[0-9]{1,5}$" - # first full-qualified domain name (FQDN) for TDengine cluster system - echo - echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}" - echo - echo -e -n "${GREEN}OR leave it blank to build one${NC}:" - read firstEp - while true; do - if [ ! -z "$firstEp" ]; then - # check the format of the firstEp - #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg - break - #else - # read -p "Please enter the correct FQDN:port: " firstEp - #fi - else - break - fi - done + # first full-qualified domain name (FQDN) for TDengine cluster system + echo + echo -e -n "${GREEN}Enter FQDN:port (like h1.${emailName}:6030) of an existing ${productName} cluster node to join${NC}" + echo + echo -e -n "${GREEN}OR leave it blank to build one${NC}:" + read firstEp + while true; do + if [ ! -z "$firstEp" ]; then + # check the format of the firstEp + #if [[ $firstEp == $FQDN_PATTERN ]]; then + # Write the first FQDN to configuration file + ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/${configFile} + break + #else + # read -p "Please enter the correct FQDN:port: " firstEp + #fi + else + break + fi + done - # user email - #EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$' - #EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$' - #EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$" - echo - echo -e -n "${GREEN}Enter your email address for priority support or enter empty to skip${NC}: " - read emailAddr - while true; do - if [ ! -z "$emailAddr" ]; then - # check the format of the emailAddr - #if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then - # Write the email address to temp file - email_file="${install_main_dir}/email" - ${csudo}bash -c "echo $emailAddr > ${email_file}" - break - #else - # read -p "Please enter the correct email address: " emailAddr - #fi - else - break - fi - done + # user email + #EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$' + #EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$' + #EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$" + echo + echo -e -n "${GREEN}Enter your email address for priority support or enter empty to skip${NC}: " + read emailAddr + while true; do + if [ ! -z "$emailAddr" ]; then + # check the format of the emailAddr + #if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then + # Write the email address to temp file + email_file="${install_main_dir}/email" + ${csudo}bash -c "echo $emailAddr > ${email_file}" + break + #else + # read -p "Please enter the correct email address: " emailAddr + #fi + else + break + fi + done } - function install_log() { - ${csudo}rm -rf ${log_dir} || : - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} + ${csudo}rm -rf ${log_dir} || : + ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - ${csudo}ln -s ${log_dir} ${install_main_dir}/log + ${csudo}ln -s ${log_dir} ${install_main_dir}/log } function install_data() { - ${csudo}mkdir -p ${data_dir} + ${csudo}mkdir -p ${data_dir} - ${csudo}ln -s ${data_dir} ${install_main_dir}/data + ${csudo}ln -s ${data_dir} ${install_main_dir}/data } function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ + ${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ } function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi + if [ -d ${script_dir}/examples ]; then + ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof ${serverName} &>/dev/null; then + ${csudo}service ${serverName} stop || : + fi - if pidof taosd &> /dev/null; then - ${csudo}service taosd stop || : - fi + if pidof tarbitrator &>/dev/null; then + ${csudo}service tarbitratord stop || : + fi - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : + if ((${initd_mod} == 1)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}chkconfig --del ${serverName} || : fi - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/taosd ]; then - ${csudo}chkconfig --del taosd || : - fi - - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/taosd ]; then - ${csudo}insserv -r taosd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/taosd ]; then - ${csudo}update-rc.d -f taosd remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}chkconfig --del tarbitratord || : + fi + elif ((${initd_mod} == 2)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}insserv -r ${serverName} || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}insserv -r tarbitratord || : + fi + elif ((${initd_mod} == 3)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}update-rc.d -f ${serverName} remove || : fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}update-rc.d -f tarbitratord remove || : + fi + fi - ${csudo}rm -f ${service_config_dir}/taosd || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : + ${csudo}rm -f ${service_config_dir}/${serverName} || : + ${csudo}rm -f ${service_config_dir}/tarbitratord || : - if $(which init &> /dev/null); then - ${csudo}init q || : - fi + if $(which init &>/dev/null); then + ${csudo}init q || : + fi } function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install taosd service - - if ((${os_type}==1)); then -# ${csudo}cp -f ${script_dir}/init.d/taosd.deb ${install_main_dir}/init.d/taosd - ${csudo}cp ${script_dir}/init.d/taosd.deb ${service_config_dir}/taosd && ${csudo}chmod a+x ${service_config_dir}/taosd -# ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then -# ${csudo}cp -f ${script_dir}/init.d/taosd.rpm ${install_main_dir}/init.d/taosd - ${csudo}cp ${script_dir}/init.d/taosd.rpm ${service_config_dir}/taosd && ${csudo}chmod a+x ${service_config_dir}/taosd -# ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi + clean_service_on_sysvinit + sleep 1 + + # Install server service + if ((${os_type} == 1)); then + # ${csudo}cp -f ${script_dir}/init.d/${serverName}.deb ${install_main_dir}/init.d/${serverName} + ${csudo}cp ${script_dir}/init.d/${serverName}.deb ${service_config_dir}/${serverName} && ${csudo}chmod a+x ${service_config_dir}/${serverName} + # ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type} == 2)); then + # ${csudo}cp -f ${script_dir}/init.d/${serverName}.rpm ${install_main_dir}/init.d/${serverName} + ${csudo}cp ${script_dir}/init.d/${serverName}.rpm ${service_config_dir}/${serverName} && ${csudo}chmod a+x ${service_config_dir}/${serverName} + # ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord + fi - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo}grep -q -F "$restart_config_str" /etc/inittab || ${csudo}bash -c "echo '${restart_config_str}' >> /etc/inittab" - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add taosd || : - ${csudo}chkconfig --level 2345 taosd on || : - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv taosd || : - ${csudo}insserv -d taosd || : - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d taosd defaults || : - ${csudo}update-rc.d tarbitratord defaults || : - fi + if ((${initd_mod} == 1)); then + ${csudo}chkconfig --add ${serverName} || : + ${csudo}chkconfig --level 2345 ${serverName} on || : + ${csudo}chkconfig --add tarbitratord || : + ${csudo}chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod} == 2)); then + ${csudo}insserv ${serverName} || : + ${csudo}insserv -d ${serverName} || : + ${csudo}insserv tarbitratord || : + ${csudo}insserv -d tarbitratord || : + elif ((${initd_mod} == 3)); then + ${csudo}update-rc.d ${serverName} defaults || : + ${csudo}update-rc.d tarbitratord defaults || : + fi } function clean_service_on_systemd() { - taosd_service_config="${service_config_dir}/taosd.service" - if systemctl is-active --quiet taosd; then - echo "TDengine is running, stopping it..." - ${csudo}systemctl stop taosd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable taosd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${taosd_service_config} + taosd_service_config="${service_config_dir}/${serverName}.service" + if systemctl is-active --quiet ${serverName}; then + echo "${productName} is running, stopping it..." + ${csudo}systemctl stop ${serverName} &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable ${serverName} &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${taosd_service_config} - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${tarbitratord_service_config} - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - if systemctl is-active --quiet nginxd; then - echo "Nginx for TDengine is running, stopping it..." - ${csudo}systemctl stop nginxd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${nginx_service_config} + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + if systemctl is-active --quiet nginxd; then + echo "Nginx for ${productName} is running, stopping it..." + ${csudo}systemctl stop nginxd &>/dev/null || echo &>/dev/null fi + ${csudo}systemctl disable nginxd &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${nginx_service_config} + fi } -# taos:2345:respawn:/etc/init.d/taosd start - function install_service_on_systemd() { - clean_service_on_systemd + clean_service_on_systemd - [ -f ${script_dir}/cfg/taosd.service ] &&\ - ${csudo}cp ${script_dir}/cfg/taosd.service \ - ${service_config_dir}/ || : - ${csudo}systemctl daemon-reload + [ -f ${script_dir}/cfg/${serverName}.service ] && + ${csudo}cp ${script_dir}/cfg/${serverName}.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload - #taosd_service_config="${service_config_dir}/taosd.service" - #${csudo}bash -c "echo '[Unit]' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" - #${csudo}bash -c "echo >> ${taosd_service_config}" - #${csudo}bash -c "echo '[Service]' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'Type=simple' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'Restart=always' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" - ##${csudo}bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}" - #${csudo}bash -c "echo >> ${taosd_service_config}" - #${csudo}bash -c "echo '[Install]' >> ${taosd_service_config}" - #${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" - ${csudo}systemctl enable taosd - - [ -f ${script_dir}/cfg/tarbitratord.service ] &&\ - ${csudo}cp ${script_dir}/cfg/tarbitratord.service \ - ${service_config_dir}/ || : - ${csudo}systemctl daemon-reload + ${csudo}systemctl enable ${serverName} - #tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - #${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - #${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo}systemctl enable tarbitratord + [ -f ${script_dir}/cfg/tarbitratord.service ] && + ${csudo}cp ${script_dir}/cfg/tarbitratord.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload - if [ "$verMode" == "cluster" ]; then - [ -f ${script_dir}/cfg/nginxd.service ] &&\ - ${csudo}cp ${script_dir}/cfg/nginxd.service \ - ${service_config_dir}/ || : - ${csudo}systemctl daemon-reload - - #nginx_service_config="${service_config_dir}/nginxd.service" - #${csudo}bash -c "echo '[Unit]' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" - #${csudo}bash -c "echo >> ${nginx_service_config}" - #${csudo}bash -c "echo '[Service]' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'Type=forking' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'Restart=always' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" - #${csudo}bash -c "echo >> ${nginx_service_config}" - #${csudo}bash -c "echo '[Install]' >> ${nginx_service_config}" - #${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" - if ! ${csudo}systemctl enable nginxd &> /dev/null; then - ${csudo}systemctl daemon-reexec - ${csudo}systemctl enable nginxd - fi - ${csudo}systemctl start nginxd - fi + if ! ${csudo}systemctl enable nginxd &>/dev/null; then + ${csudo}systemctl daemon-reexec + ${csudo}systemctl enable nginxd + fi + ${csudo}systemctl start nginxd } function install_taosadapter_service() { - if ((${service_mod}==0)); then - [ -f ${script_dir}/cfg/taosadapter.service ] &&\ - ${csudo}cp ${script_dir}/cfg/taosadapter.service \ - ${service_config_dir}/ || : - ${csudo}systemctl daemon-reload - fi + if ((${service_mod} == 0)); then + [ -f ${script_dir}/cfg/taosadapter.service ] && + ${csudo}cp ${script_dir}/cfg/taosadapter.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + fi } function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - # must manual stop taosd - kill_process taosd - fi + if ((${service_mod} == 0)); then + install_service_on_systemd + elif ((${service_mod} == 1)); then + install_service_on_sysvinit + else + kill_process ${serverName} + fi } -vercomp () { - if [[ $1 == $2 ]]; then - return 0 - fi - local IFS=. - local i ver1=($1) ver2=($2) - # fill empty fields in ver1 with zeros - for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do - ver1[i]=0 - done - - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]] - then - # fill empty fields in ver2 with zeros - ver2[i]=0 - fi - if ((10#${ver1[i]} > 10#${ver2[i]})) - then - return 1 - fi - if ((10#${ver1[i]} < 10#${ver2[i]})) - then - return 2 - fi - done +vercomp() { + if [[ $1 == $2 ]]; then return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i = 0; i < ${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]]; then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})); then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})); then + return 2 + fi + done + return 0 } function is_version_compatible() { - curr_version=`ls ${script_dir}/driver/libtaos.so* | awk -F 'libtaos.so.' '{print $2}'` + curr_version=$(ls ${script_dir}/driver/libtaos.so* | awk -F 'libtaos.so.' '{print $2}') - if [ -f ${script_dir}/driver/vercomp.txt ]; then - min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` - else - min_compatible_version=$(${script_dir}/bin/taosd -V | head -1 | cut -d ' ' -f 5) - fi + if [ -f ${script_dir}/driver/vercomp.txt ]; then + min_compatible_version=$(cat ${script_dir}/driver/vercomp.txt) + else + min_compatible_version=$(${script_dir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 5) + fi - # [TD-5628] prompt to execute taosd --force-keep-file if upgrade from lower version within 2.0.16.0 - exist_version=$(/usr/local/taos/bin/taosd -V | head -1 | cut -d ' ' -f 3) - vercomp $exist_version "2.0.16.0" - case $? in - 2) - prompt_force=1 - ;; - esac + # [TD-5628] prompt to execute taosd --force-keep-file if upgrade from lower version within 2.0.16.0 + exist_version=$(${installDir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 3) + vercomp $exist_version "2.0.16.0" + case $? in + 2) + prompt_force=1 + ;; + esac - vercomp $curr_version $min_compatible_version - echo "" # avoid $? value not update + vercomp $curr_version $min_compatible_version + echo "" # avoid $? value not update - case $? in - 0) return 0;; - 1) return 0;; - 2) return 1;; - esac + case $? in + 0) return 0 ;; + 1) return 0 ;; + 2) return 1 ;; + esac } function update_TDengine() { - # Check if version compatible - if ! is_version_compatible; then - echo -e "${RED}Version incompatible${NC}" - return 1 - fi + # Check if version compatible + if ! is_version_compatible; then + echo -e "${RED}Version incompatible${NC}" + return 1 + fi - # Start to update - if [ ! -e taos.tar.gz ]; then - echo "File taos.tar.gz does not exist" - exit 1 - fi - tar -zxf taos.tar.gz - install_jemalloc - #install_avro lib - #install_avro lib64 - - echo -e "${GREEN}Start to update TDengine...${NC}" - # Stop the service if running - if pidof taosd &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop taosd || : - elif ((${service_mod}==1)); then - ${csudo}service taosd stop || : - else - kill_process taosd - fi - sleep 1 + # Start to update + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" + exit 1 + fi + tar -zxf ${tarName} + install_jemalloc + #install_avro lib + #install_avro lib64 + + echo -e "${GREEN}Start to update ${productName}...${NC}" + # Stop the service if running + if pidof ${serverName} &>/dev/null; then + if ((${service_mod} == 0)); then + ${csudo}systemctl stop ${serverName} || : + elif ((${service_mod} == 1)); then + ${csudo}service ${serverName} stop || : + else + kill_process ${serverName} fi + sleep 1 + fi - if [ "$verMode" == "cluster" ]; then - if pidof nginx &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop nginxd || : - elif ((${service_mod}==1)); then - ${csudo}service nginxd stop || : - else - kill_process nginx - fi - sleep 1 + if [ "$verMode" == "cluster" ]; then + if pidof nginx &>/dev/null; then + if ((${service_mod} == 0)); then + ${csudo}systemctl stop nginxd || : + elif ((${service_mod} == 1)); then + ${csudo}service nginxd stop || : + else + kill_process nginx fi + sleep 1 fi + fi - install_main_path - - install_log - install_header - install_lib -# if [ "$pagMode" != "lite" ]; then -# install_connector -# fi - install_examples - if [ -z $1 ]; then - install_bin - install_service - install_taosadapter_service - install_taosadapter_config - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if openresty is installed - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for TDengine is updated successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" - fi - fi - fi + install_main_path - #echo - #echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit /etc/taos/taosadapter.toml" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}service taosd start${NC}" + install_log + install_header + install_lib + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + if [ -z $1 ]; then + install_bin + install_service + install_taosadapter_service + install_config + install_taosadapter_config + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + # Check if nginx is installed successfully + if type curl &>/dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then + echo -e "\033[44;32;1mNginx for ${productName} is updated successfully!${NC}" + openresty_work=true else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m" fi + fi + fi - if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - else - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}" - fi + #echo + #echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" + echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + if ((${service_mod} == 0)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + elif ((${service_mod} == 1)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + else + echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}" + fi - if ((${prompt_force}==1)); then - echo "" - echo -e "${RED}Please run 'taosd --force-keep-file' at first time for the exist TDengine $exist_version!${NC}" - fi - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" else - install_bin - install_config + echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell${NC}" + fi - echo - echo -e "\033[44;32;1mTDengine client is updated successfully!${NC}" + if ((${prompt_force} == 1)); then + echo "" + echo -e "${RED}Please run '${serverName} --force-keep-file' at first time for the exist ${productName} $exist_version!${NC}" fi + echo + echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" + else + install_bin + install_config + + echo + echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}" + fi - rm -rf $(tar -tf taos.tar.gz) + rm -rf $(tar -tf ${tarName}) } function install_TDengine() { - # Start to install - if [ ! -e taos.tar.gz ]; then - echo "File taos.tar.gz does not exist" - exit 1 - fi - tar -zxf taos.tar.gz - - echo -e "${GREEN}Start to install TDengine...${NC}" + # Start to install + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" + exit 1 + fi + tar -zxf ${tarName} - install_main_path + echo -e "${GREEN}Start to install ${productName}...${NC}" - if [ -z $1 ]; then - install_data - fi + install_main_path - install_log - install_header - install_lib - install_jemalloc - #install_avro lib - #install_avro lib64 - -# if [ "$pagMode" != "lite" ]; then -# install_connector -# fi - install_examples - - if [ -z $1 ]; then # install service and client - # For installing new - install_bin - install_service - install_taosadapter_service - install_taosadapter_config - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for TDengine is installed successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" - fi - fi - fi + if [ -z $1 ]; then + install_data + fi - install_config - - # Ask if to start the service - #echo - #echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit /etc/taos/taosadapter.toml" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}service taosd start${NC}" + install_log + install_header + install_lib + install_jemalloc + #install_avro lib + #install_avro lib64 + + # if [ "$pagMode" != "lite" ]; then + # install_connector + # fi + install_examples + + if [ -z $1 ]; then # install service and client + # For installing new + install_bin + install_service + install_taosadapter_service + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if nginx is installed successfully + if type curl &>/dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then + echo -e "\033[44;32;1mNginx for ${productName} is installed successfully!${NC}" + openresty_work=true else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" - echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" + echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m" fi + fi + fi - #if [ ${openresty_work} = 'true' ]; then - # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - #else - # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - #fi - - if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo - elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" - echo - fi + install_config - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo - else # Only install client - install_bin - install_config - echo - echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" + # Ask if to start the service + #echo + #echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" + echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + if ((${service_mod} == 0)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + elif ((${service_mod} == 1)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + else + echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" fi - touch ~/.taos_history - rm -rf $(tar -tf taos.tar.gz) -} + if [ ! -z "$firstEp" ]; then + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]]; then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]]; then + echo -e "${GREEN_DARK}To access ${productName} ${NC}: ${clientName} -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access ${productName} ${NC}: ${clientName} -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access ${productName} ${NC}: ${clientName} -h $serverFqdn${GREEN_DARK} to login into ${productName} server${NC}" + echo + fi + echo -e "\033[44;32;1m${productName} is installed successfully!${NC}" + echo + else # Only install client + install_bin + install_config + echo + echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}" + fi + + touch ~/.${historyFile} + rm -rf $(tar -tf ${tarName}) +} ## ==============================Main program starts from here============================ serverFqdn=$(hostname) if [ "$verType" == "server" ]; then - # Install server and client - if [ -x ${bin_dir}/taosd ]; then - update_flag=1 - update_TDengine - else - install_TDengine - fi + # Install server and client + if [ -x ${bin_dir}/${serverName} ]; then + update_flag=1 + update_TDengine + else + install_TDengine + fi elif [ "$verType" == "client" ]; then - interactiveFqdn=no - # Only install client - if [ -x ${bin_dir}/taos ]; then - update_flag=1 - update_TDengine client - else - install_TDengine client - fi + interactiveFqdn=no + # Only install client + if [ -x ${bin_dir}/${clientName} ]; then + update_flag=1 + update_TDengine client + else + install_TDengine client + fi else - echo "please input correct verType" + echo "please input correct verType" fi diff --git a/packaging/tools/install_arbi.sh b/packaging/tools/install_arbi.sh index a582e0322aa025a69690a4041afd2fa24cc20a62..e3c63965d4beee31cea91d2f8fd84e3d2bdd00d3 100755 --- a/packaging/tools/install_arbi.sh +++ b/packaging/tools/install_arbi.sh @@ -28,57 +28,56 @@ GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " +if command -v sudo >/dev/null; then + csudo="sudo " fi update_flag=0 initd_mod=0 service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else +if pidof systemd &>/dev/null; then + service_mod=0 +elif $(which service &>/dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &>/dev/null); then + initd_mod=1 + elif $(which insserv &>/dev/null); then + initd_mod=2 + elif $(which update-rc.d &>/dev/null); then + initd_mod=3 + else service_mod=2 + fi +else + service_mod=2 fi - # get the operating system type for using the corresponding init file # ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification #osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) || : else osinfo="" fi #echo "osinfo: ${osinfo}" os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" +if echo $osinfo | grep -qwi "ubuntu"; then + # echo "This is ubuntu system" os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" +elif echo $osinfo | grep -qwi "debian"; then + # echo "This is debian system" os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" +elif echo $osinfo | grep -qwi "Kylin"; then + # echo "This is Kylin system" os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" +elif echo $osinfo | grep -qwi "centos"; then + # echo "This is centos system" os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" +elif echo $osinfo | grep -qwi "fedora"; then + # echo "This is fedora system" os_type=2 else echo " osinfo: ${osinfo}" @@ -91,265 +90,250 @@ fi function kill_tarbitrator() { pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : + ${csudo}kill -9 $pid || : fi } function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/bin - #${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d + #create install main dir and all sub dir + ${csudo}rm -rf ${install_main_dir} || : + ${csudo}mkdir -p ${install_main_dir} + ${csudo}mkdir -p ${install_main_dir}/bin + #${csudo}mkdir -p ${install_main_dir}/include + ${csudo}mkdir -p ${install_main_dir}/init.d } function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/rmtarbitrator || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : + # Remove links + ${csudo}rm -f ${bin_link_dir}/rmtarbitrator || : + ${csudo}rm -f ${bin_link_dir}/tarbitrator || : + ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/remove_arbi.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi.sh ${bin_link_dir}/rmtarbitrator || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : } function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h + ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : + ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* + ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h + ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi -} + jemalloc_dir=${script_dir}/jemalloc -function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || : + if [ -d ${jemalloc_dir} ]; then + ${csudo}/usr/bin/install -c -d /usr/local/bin - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo}/usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig fi fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" + ${csudo}ldconfig + else + echo "/etc/ld.so.conf.d not found!" fi + fi } -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install taosd service +function clean_service_on_sysvinit() { + if pidof tarbitrator &>/dev/null; then + ${csudo}service tarbitratord stop || : + fi - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord + if ((${initd_mod} == 1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}chkconfig --del tarbitratord || : fi - - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo}grep -q -F "$restart_config_str" /etc/inittab || ${csudo}bash -c "echo '${restart_config_str}' >> /etc/inittab" - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d tarbitratord defaults || : + elif ((${initd_mod} == 2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}insserv -r tarbitratord || : fi + elif ((${initd_mod} == 3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo}rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &>/dev/null); then + ${csudo}init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + if ((${os_type} == 1)); then + ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type} == 2)); then + ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod} == 1)); then + ${csudo}chkconfig --add tarbitratord || : + ${csudo}chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod} == 2)); then + ${csudo}insserv tarbitratord || : + ${csudo}insserv -d tarbitratord || : + elif ((${initd_mod} == 3)); then + ${csudo}update-rc.d tarbitratord defaults || : + fi } function clean_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/tarbitratord.service" if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + echo "tarbitrator is running, stopping it..." + ${csudo}systemctl stop tarbitratord &>/dev/null || echo &>/dev/null fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + ${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null ${csudo}rm -f ${tarbitratord_service_config} } -# taos:2345:respawn:/etc/init.d/tarbitratord start - function install_service_on_systemd() { - clean_service_on_systemd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - ${csudo}systemctl enable tarbitratord + clean_service_on_systemd + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + + ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + ${csudo}systemctl enable tarbitratord } function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - # must manual stop taosd - kill_tarbitrator - fi + if ((${service_mod} == 0)); then + install_service_on_systemd + elif ((${service_mod} == 1)); then + install_service_on_sysvinit + else + kill_tarbitrator + fi } function update_TDengine() { - # Start to update - echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}" - # Stop the service if running - if pidof tarbitrator &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop tarbitratord || : - elif ((${service_mod}==1)); then - ${csudo}service tarbitratord stop || : - else - kill_tarbitrator - fi - sleep 1 - fi - - install_main_path - #install_header - install_bin - install_service - install_jemalloc - - echo - #echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" + # Start to update + echo -e "${GREEN}Start to update TDengine's arbitrator ...${NC}" + # Stop the service if running + if pidof tarbitrator &>/dev/null; then + if ((${service_mod} == 0)); then + ${csudo}systemctl stop tarbitratord || : + elif ((${service_mod} == 1)); then + ${csudo}service tarbitratord stop || : else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" + kill_tarbitrator fi - echo - echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}" + sleep 1 + fi + + install_main_path + #install_header + install_bin + install_service + install_jemalloc + + echo + if ((${service_mod} == 0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" + elif ((${service_mod} == 1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" + fi + echo + echo -e "\033[44;32;1mTDengine's arbitrator is updated successfully!${NC}" } function install_TDengine() { - # Start to install - echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}" - - install_main_path - #install_header - install_bin - install_service - install_jemalloc - - echo - #echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" - fi + # Start to install + echo -e "${GREEN}Start to install TDengine's arbitrator ...${NC}" + + install_main_path + #install_header + install_bin + install_service + install_jemalloc + + echo + if ((${service_mod} == 0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" + elif ((${service_mod} == 1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" + fi - echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}" - echo + echo -e "\033[44;32;1mTDengine's arbitrator is installed successfully!${NC}" + echo } - ## ==============================Main program starts from here============================ # Install server and client if [ -x ${bin_dir}/tarbitrator ]; then - update_flag=1 - update_TDengine + update_flag=1 + update_TDengine else - install_TDengine + install_TDengine fi - diff --git a/packaging/tools/install_arbi_jh.sh b/packaging/tools/install_arbi_jh.sh deleted file mode 100755 index 3b0d050c2c1c395b197ed040ce20c86f512cad2b..0000000000000000000000000000000000000000 --- a/packaging/tools/install_arbi_jh.sh +++ /dev/null @@ -1,286 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) - -bin_link_dir="/usr/bin" -#inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/tarbitrator" - -# old bin dir -bin_dir="/usr/local/tarbitrator/bin" - -service_config_dir="/etc/systemd/system" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact jhict.com for support." - os_type=1 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/bin - #${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/rmtarbitrator || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/remove_arbi_jh.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi_jh.sh ${bin_link_dir}/rmtarbitrator || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install server service - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -function install_service_on_systemd() { - clean_service_on_systemd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=jh_iot arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - ${csudo}systemctl enable tarbitratord -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - kill_tarbitrator - fi -} - -function update() { - # Start to update - echo -e "${GREEN}Start to update jh_iot's arbitrator ...${NC}" - # Stop the service if running - if pidof tarbitrator &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop tarbitratord || : - elif ((${service_mod}==1)); then - ${csudo}service tarbitratord stop || : - else - kill_tarbitrator - fi - sleep 1 - fi - - install_main_path - #install_header - install_bin - install_service - - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" - fi - echo - echo -e "\033[44;32;1mjh_iot's arbitrator is updated successfully!${NC}" -} - -function install() { - # Start to install - echo -e "${GREEN}Start to install jh_iot's arbitrator ...${NC}" - - install_main_path - #install_header - install_bin - install_service - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" - fi - - echo -e "\033[44;32;1mjh_iot's arbitrator is installed successfully!${NC}" - echo -} - - -## ==============================Main program starts from here============================ -# Install server and client -if [ -x ${bin_dir}/tarbitrator ]; then - update_flag=1 - update -else - install -fi - diff --git a/packaging/tools/install_arbi_kh.sh b/packaging/tools/install_arbi_kh.sh deleted file mode 100755 index 3c95639d181b6796128315fc9afb8bfa586c6dc3..0000000000000000000000000000000000000000 --- a/packaging/tools/install_arbi_kh.sh +++ /dev/null @@ -1,286 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) - -bin_link_dir="/usr/bin" -#inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/tarbitrator" - -# old bin dir -bin_dir="/usr/local/tarbitrator/bin" - -service_config_dir="/etc/systemd/system" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact wellintech.com for support." - os_type=1 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/bin - #${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/rmtarbitrator || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/remove_arbi_kh.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi_kh.sh ${bin_link_dir}/rmtarbitrator || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install khserver service - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -function install_service_on_systemd() { - clean_service_on_systemd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=KingHistorian arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - ${csudo}systemctl enable tarbitratord -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - kill_tarbitrator - fi -} - -function update() { - # Start to update - echo -e "${GREEN}Start to update KingHistorian's arbitrator ...${NC}" - # Stop the service if running - if pidof tarbitrator &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop tarbitratord || : - elif ((${service_mod}==1)); then - ${csudo}service tarbitratord stop || : - else - kill_tarbitrator - fi - sleep 1 - fi - - install_main_path - #install_header - install_bin - install_service - - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" - fi - echo - echo -e "\033[44;32;1mKingHistorian's arbitrator is updated successfully!${NC}" -} - -function install() { - # Start to install - echo -e "${GREEN}Start to install KingHistorian's arbitrator ...${NC}" - - install_main_path - #install_header - install_bin - install_service - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" - fi - - echo -e "\033[44;32;1mKingHistorian's arbitrator is installed successfully!${NC}" - echo -} - - -## ==============================Main program starts from here============================ -# Install server and client -if [ -x ${bin_dir}/tarbitrator ]; then - update_flag=1 - update -else - install -fi - diff --git a/packaging/tools/install_arbi_power.sh b/packaging/tools/install_arbi_power.sh deleted file mode 100755 index 94f1b7fc538874d8c03aa1ecffce4bee528e8c0c..0000000000000000000000000000000000000000 --- a/packaging/tools/install_arbi_power.sh +++ /dev/null @@ -1,349 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) - -bin_link_dir="/usr/bin" -#inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/tarbitrator" - -# old bin dir -bin_dir="/usr/local/tarbitrator/bin" - -service_config_dir="/etc/systemd/system" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact taosdata.com for support." - os_type=1 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/bin - #${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/rmtarbitrator || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/remove_arbi_power.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi_power.sh ${bin_link_dir}/rmtarbitrator || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : -} - -function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install powerd service - - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start" - #${csudo}grep -q -F "$restart_config_str" /etc/inittab || ${csudo}bash -c "echo '${restart_config_str}' >> /etc/inittab" - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -# power:2345:respawn:/etc/init.d/tarbitratord start - -function install_service_on_systemd() { - clean_service_on_systemd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=PowerDB arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - ${csudo}systemctl enable tarbitratord -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - kill_tarbitrator - fi -} - -function update_PowerDB() { - # Start to update - echo -e "${GREEN}Start to update PowerDB's arbitrator ...${NC}" - # Stop the service if running - if pidof tarbitrator &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop tarbitratord || : - elif ((${service_mod}==1)); then - ${csudo}service tarbitratord stop || : - else - kill_tarbitrator - fi - sleep 1 - fi - - install_main_path - #install_header - install_bin - install_service - install_jemalloc - - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" - fi - echo - echo -e "\033[44;32;1mPowerDB's arbitrator is updated successfully!${NC}" -} - -function install_PowerDB() { - # Start to install - echo -e "${GREEN}Start to install PowerDB's arbitrator ...${NC}" - - install_main_path - #install_header - install_bin - install_service - install_jemalloc - - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" - fi - - echo -e "\033[44;32;1mPowerDB's arbitrator is installed successfully!${NC}" - echo -} - - -## ==============================Main program starts from here============================ -# Install server and client -if [ -x ${bin_dir}/tarbitrator ]; then - update_flag=1 - update_PowerDB -else - install_PowerDB -fi - diff --git a/packaging/tools/install_arbi_pro.sh b/packaging/tools/install_arbi_pro.sh deleted file mode 100755 index 5c5fa485b8852ea87fdbd58cc76a6b4b6b4377de..0000000000000000000000000000000000000000 --- a/packaging/tools/install_arbi_pro.sh +++ /dev/null @@ -1,288 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) - -bin_link_dir="/usr/bin" -#inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/tarbitrator" - -# old bin dir -bin_dir="/usr/local/tarbitrator/bin" - -service_config_dir="/etc/systemd/system" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact hanatech.com.cn for support." - os_type=1 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/bin - #${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/rmtarbitrator || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/remove_arbi_prodb.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi_prodb.sh ${bin_link_dir}/rmtarbitrator || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install prodbs service - - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -function install_service_on_systemd() { - clean_service_on_systemd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=ProDB arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - ${csudo}systemctl enable tarbitratord -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - kill_tarbitrator - fi -} - -function update_prodb() { - # Start to update - echo -e "${GREEN}Start to update ProDB's arbitrator ...${NC}" - # Stop the service if running - if pidof tarbitrator &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop tarbitratord || : - elif ((${service_mod}==1)); then - ${csudo}service tarbitratord stop || : - else - kill_tarbitrator - fi - sleep 1 - fi - - install_main_path - #install_header - install_bin - install_service - - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" - fi - echo - echo -e "\033[44;32;1mProDB's arbitrator is updated successfully!${NC}" -} - -function install_prodb() { - # Start to install - echo -e "${GREEN}Start to install ProDB's arbitrator ...${NC}" - - install_main_path - #install_header - install_bin - install_service - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" - fi - - echo -e "\033[44;32;1mProDB's arbitrator is installed successfully!${NC}" - echo -} - - -## ==============================Main program starts from here============================ -# Install server and client -if [ -x ${bin_dir}/tarbitrator ]; then - update_flag=1 - update_prodb -else - install_prodb -fi - diff --git a/packaging/tools/install_arbi_tq.sh b/packaging/tools/install_arbi_tq.sh deleted file mode 100755 index 132346a749f1559436e6db244593c660dbd69556..0000000000000000000000000000000000000000 --- a/packaging/tools/install_arbi_tq.sh +++ /dev/null @@ -1,293 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) - -bin_link_dir="/usr/bin" -#inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/tarbitrator" - -# old bin dir -bin_dir="/usr/local/tarbitrator/bin" - -service_config_dir="/etc/systemd/system" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact taosdata.com for support." - os_type=1 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/bin - #${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/rmtarbitrator || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/remove_arbi_tq.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_arbi_tq.sh ${bin_link_dir}/rmtarbitrator || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install tqd service - - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start" - #${csudo}grep -q -F "$restart_config_str" /etc/inittab || ${csudo}bash -c "echo '${restart_config_str}' >> /etc/inittab" - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -# tq:2345:respawn:/etc/init.d/tarbitratord start - -function install_service_on_systemd() { - clean_service_on_systemd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=TQ arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - ${csudo}systemctl enable tarbitratord -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - kill_tarbitrator - fi -} - -function update_tq() { - # Start to update - echo -e "${GREEN}Start to update TQ's arbitrator ...${NC}" - # Stop the service if running - if pidof tarbitrator &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop tarbitratord || : - elif ((${service_mod}==1)); then - ${csudo}service tarbitratord stop || : - else - kill_tarbitrator - fi - sleep 1 - fi - - install_main_path - #install_header - install_bin - install_service - - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" - fi - echo - echo -e "\033[44;32;1mTQ's arbitrator is updated successfully!${NC}" -} - -function install_tq() { - # Start to install - echo -e "${GREEN}Start to install TQ's arbitrator ...${NC}" - - install_main_path - #install_header - install_bin - install_service - echo - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}systemctl start tarbitratord${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo}service tarbitratord start${NC}" - else - echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" - fi - - echo -e "\033[44;32;1mTQ's arbitrator is installed successfully!${NC}" - echo -} - - -## ==============================Main program starts from here============================ -# Install server and client -if [ -x ${bin_dir}/tarbitrator ]; then - update_flag=1 - update_tq -else - install_tq -fi - diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 4ea60d1d55336d606f7eabf10b464cff90860519..baef60886405a8bafaf28a263d233d6846b5f911 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -8,25 +8,36 @@ set -e # -----------------------Variables definition--------------------- +dataDir="/var/lib/taos" +logDir="/var/log/taos" +productName="TDengine" +installDir="/usr/local/taos" +configDir="/etc/taos" +serverName="taosd" +clientName="taos" +uninstallScript="rmtaos" +configFile="taos.cfg" +tarName="taos.tar.gz" + osType=Linux pagMode=full if [ "$osType" != "Darwin" ]; then script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory - data_dir="/var/lib/taos" - log_dir="/var/log/taos" + data_dir=${dataDir} + log_dir=${logDir} else script_dir=`dirname $0` cd ${script_dir} script_dir="$(pwd)" - data_dir="/var/lib/taos" - log_dir=~/TDengine/log + data_dir=${dataDir} + log_dir=~/${productName}/log fi -log_link_dir="/usr/local/taos/log" +log_link_dir="${installDir}/log" -cfg_install_dir="/etc/taos" +cfg_install_dir=${configDir} if [ "$osType" != "Darwin" ]; then bin_link_dir="/usr/bin" @@ -40,13 +51,10 @@ else fi #install main path -install_main_dir="/usr/local/taos" +install_main_dir="${installDir}" # old bin dir -bin_dir="/usr/local/taos/bin" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/taos" +bin_dir="${installDir}/bin" # Color setting RED='\033[0;31m' @@ -63,7 +71,7 @@ fi update_flag=0 function kill_client() { - pid=$(ps -ef | grep "taos" | grep -v "grep" | awk '{print $2}') + pid=$(ps -ef | grep "${clientName}" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi @@ -83,21 +91,21 @@ function install_main_path() { function install_bin() { # Remove links - ${csudo}rm -f ${bin_link_dir}/taos || : + ${csudo}rm -f ${bin_link_dir}/${clientName} || : if [ "$osType" != "Darwin" ]; then ${csudo}rm -f ${bin_link_dir}/taosdemo || : fi - ${csudo}rm -f ${bin_link_dir}/rmtaos || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* #Make link - [ -x ${install_main_dir}/bin/taos ] && ${csudo}ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : + [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : if [ "$osType" != "Darwin" ]; then [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : fi - [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : } @@ -196,16 +204,14 @@ function install_jemalloc() { } function install_config() { - #${csudo}rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/${configFile} ]; then ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo}cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} ${csudo}chmod 644 ${cfg_install_dir}/* fi - ${csudo}cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo}ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org + ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg } @@ -232,15 +238,15 @@ function install_examples() { function update_TDengine() { # Start to update - if [ ! -e taos.tar.gz ]; then - echo "File taos.tar.gz does not exist" + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" exit 1 fi - tar -zxf taos.tar.gz + tar -zxf ${tarName} - echo -e "${GREEN}Start to update TDengine client...${NC}" + echo -e "${GREEN}Start to update ${productName} client...${NC}" # Stop the client shell if running - if pidof taos &> /dev/null; then + if pidof ${clientName} &> /dev/null; then kill_client sleep 1 fi @@ -259,20 +265,20 @@ function update_TDengine() { install_config echo - echo -e "\033[44;32;1mTDengine client is updated successfully!${NC}" + echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}" - rm -rf $(tar -tf taos.tar.gz) + rm -rf $(tar -tf ${tarName}) } function install_TDengine() { # Start to install - if [ ! -e taos.tar.gz ]; then - echo "File taos.tar.gz does not exist" + if [ ! -e ${tarName} ]; then + echo "File ${tarName} does not exist" exit 1 fi - tar -zxf taos.tar.gz + tar -zxf ${tarName} - echo -e "${GREEN}Start to install TDengine client...${NC}" + echo -e "${GREEN}Start to install ${productName} client...${NC}" install_main_path install_log @@ -287,21 +293,21 @@ function install_TDengine() { install_config echo - echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" + echo -e "\033[44;32;1m${productName} client is installed successfully!${NC}" - rm -rf $(tar -tf taos.tar.gz) + rm -rf $(tar -tf ${tarName}) } ## ==============================Main program starts from here============================ # Install or updata client and client # if server is already install, don't install client - if [ -e ${bin_dir}/taosd ]; then - echo -e "\033[44;32;1mThere are already installed TDengine server, so don't need install client!${NC}" + if [ -e ${bin_dir}/${serverName} ]; then + echo -e "\033[44;32;1mThere are already installed ${productName} server, so don't need install client!${NC}" exit 0 fi - if [ -x ${bin_dir}/taos ]; then + if [ -x ${bin_dir}/${clientName} ]; then update_flag=1 update_TDengine else diff --git a/packaging/tools/install_client_jh.sh b/packaging/tools/install_client_jh.sh deleted file mode 100755 index 436b683b29b81e612ef15af6cf976f29b42347fc..0000000000000000000000000000000000000000 --- a/packaging/tools/install_client_jh.sh +++ /dev/null @@ -1,239 +0,0 @@ -#!/bin/bash -# -# This file is used to install jh_taos client on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- - -osType=Linux -pagMode=full - -if [ "$osType" != "Darwin" ]; then - script_dir=$(dirname $(readlink -f "$0")) - # Dynamic directory - data_dir="/var/lib/jh_taos" - log_dir="/var/log/jh_taos" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - data_dir="/var/lib/jh_taos" - log_dir="~/jh_taos/log" -fi - -log_link_dir="/usr/local/jh_taos/log" - -cfg_install_dir="/etc/jh_taos" - -if [ "$osType" != "Darwin" ]; then - bin_link_dir="/usr/bin" - lib_link_dir="/usr/lib" - lib64_link_dir="/usr/lib64" - inc_link_dir="/usr/include" -else - bin_link_dir="/usr/local/bin" - lib_link_dir="/usr/local/lib" - inc_link_dir="/usr/local/include" -fi - -#install main path -install_main_dir="/usr/local/jh_taos" - -# old bin dir -bin_dir="/usr/local/jh_taos/bin" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -function kill_client() { - pid=$(ps -ef | grep "jh_taos" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin - ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/jh_taos || : - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${bin_link_dir}/jhdemo || : - ${csudo}rm -f ${bin_link_dir}/jh_taosdump || : - fi - ${csudo}rm -f ${bin_link_dir}/rmjh || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/jh_taos ] && ${csudo}ln -s ${install_main_dir}/bin/jh_taos ${bin_link_dir}/jh_taos || : - if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo}ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || : - [ -x ${install_main_dir}/bin/jh_taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/jh_taosdump ${bin_link_dir}/jh_taosdump || : - fi - [ -x ${install_main_dir}/bin/remove_client_jh.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client_jh.sh ${bin_link_dir}/rmjh || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : -} - -function clean_lib() { - sudo rm -f /usr/lib/libtaos.* || : - sudo rm -rf ${lib_dir} || : -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - if [ "$osType" != "Darwin" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [ -d "${lib64_link_dir}" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - else - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib - ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib - fi - - ${csudo}ldconfig -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo}cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo}ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - - if [ "$osType" != "Darwin" ]; then - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - else - mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - fi - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function update() { - # Start to update - if [ ! -e jh_taos.tar.gz ]; then - echo "File jh_taos.tar.gz does not exist" - exit 1 - fi - tar -zxf jh_taos.tar.gz - - echo -e "${GREEN}Start to update jh_iot client...${NC}" - # Stop the client shell if running - if pidof jh_taos &> /dev/null; then - kill_client - sleep 1 - fi - - install_main_path - - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mjh_iot client is updated successfully!${NC}" - - rm -rf $(tar -tf jh_taos.tar.gz) -} - -function install() { - # Start to install - if [ ! -e jh_taos.tar.gz ]; then - echo "File jh_taos.tar.gz does not exist" - exit 1 - fi - tar -zxf jh_taos.tar.gz - - echo -e "${GREEN}Start to install jh_taos client...${NC}" - - install_main_path - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mjh_iot client is installed successfully!${NC}" - - rm -rf $(tar -tf jh_taos.tar.gz) -} - - -## ==============================Main program starts from here============================ -# Install or updata client and client -# if server is already install, don't install client - if [ -e ${bin_dir}/jh_taosd ]; then - echo -e "\033[44;32;1mThere are already installed jh_iot server, so don't need install client!${NC}" - exit 0 - fi - - if [ -x ${bin_dir}/jh_taos ]; then - update_flag=1 - update - else - install - fi diff --git a/packaging/tools/install_client_kh.sh b/packaging/tools/install_client_kh.sh deleted file mode 100755 index 0b8c0f5aeba96fff65907105ca571cf5955c6648..0000000000000000000000000000000000000000 --- a/packaging/tools/install_client_kh.sh +++ /dev/null @@ -1,240 +0,0 @@ -#!/bin/bash -# -# This file is used to install kinghistorian client on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- - -osType=Linux -pagMode=full - -if [ "$osType" != "Darwin" ]; then - script_dir=$(dirname $(readlink -f "$0")) - # Dynamic directory - data_dir="/var/lib/kinghistorian" - log_dir="/var/log/kinghistorian" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - data_dir="/var/lib/kinghistorian" - log_dir="~/kinghistorian/log" -fi - -log_link_dir="/usr/local/kinghistorian/log" - -cfg_install_dir="/etc/kinghistorian" - -if [ "$osType" != "Darwin" ]; then - bin_link_dir="/usr/bin" - lib_link_dir="/usr/lib" - lib64_link_dir="/usr/lib64" - inc_link_dir="/usr/include" -else - bin_link_dir="/usr/local/bin" - lib_link_dir="/usr/local/lib" - inc_link_dir="/usr/local/include" -fi - -#install main path -install_main_dir="/usr/local/kinghistorian" - -# old bin dir -bin_dir="/usr/local/kinghistorian/bin" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -function kill_client() { - pid=$(ps -ef | grep "khclient" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin - ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/khclient || : - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${bin_link_dir}/khdemo || : - ${csudo}rm -f ${bin_link_dir}/khdump || : - fi - ${csudo}rm -f ${bin_link_dir}/rmkh || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/khclient ] && ${csudo}ln -s ${install_main_dir}/bin/khclient ${bin_link_dir}/khclient || : - if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/khdemo ] && ${csudo}ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || : - [ -x ${install_main_dir}/bin/khdump ] && ${csudo}ln -s ${install_main_dir}/bin/khdump ${bin_link_dir}/khdump || : - fi - [ -x ${install_main_dir}/bin/remove_client_kh.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client_kh.sh ${bin_link_dir}/rmkh || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : -} - -function clean_lib() { - sudo rm -f /usr/lib/libtaos.* || : - sudo rm -rf ${lib_dir} || : -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : - - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - if [ "$osType" != "Darwin" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [ -d "${lib64_link_dir}" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - else - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib - ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib - fi - - ${csudo}ldconfig -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/kinghistorian.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/kinghistorian.cfg ] && ${csudo}cp ${script_dir}/cfg/kinghistorian.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/kinghistorian.cfg ${install_main_dir}/cfg/kinghistorian.cfg.org - ${csudo}ln -s ${cfg_install_dir}/kinghistorian.cfg ${install_main_dir}/cfg -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - - if [ "$osType" != "Darwin" ]; then - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - else - mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - fi - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function update() { - # Start to update - if [ ! -e kinghistorian.tar.gz ]; then - echo "File kinghistorian.tar.gz does not exist" - exit 1 - fi - tar -zxf kinghistorian.tar.gz - - echo -e "${GREEN}Start to update KingHistorian client...${NC}" - # Stop the client shell if running - if pidof khclient &> /dev/null; then - kill_client - sleep 1 - fi - - install_main_path - - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mKingHistorian client is updated successfully!${NC}" - - rm -rf $(tar -tf kinghistorian.tar.gz) -} - -function install() { - # Start to install - if [ ! -e kinghistorian.tar.gz ]; then - echo "File kinghistorian.tar.gz does not exist" - exit 1 - fi - tar -zxf kinghistorian.tar.gz - - echo -e "${GREEN}Start to install KingHistorian client...${NC}" - - install_main_path - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mKingHistorian client is installed successfully!${NC}" - - rm -rf $(tar -tf kinghistorian.tar.gz) -} - - -## ==============================Main program starts from here============================ -# Install or updata client and client -# if server is already install, don't install client - if [ -e ${bin_dir}/khserver ]; then - echo -e "\033[44;32;1mThere are already installed KingHistorian server, so don't need install client!${NC}" - exit 0 - fi - - if [ -x ${bin_dir}/khclient ]; then - update_flag=1 - update - else - install - fi diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh deleted file mode 100755 index 4e7ee96a6255bd74862a509206ef6aff69320916..0000000000000000000000000000000000000000 --- a/packaging/tools/install_client_power.sh +++ /dev/null @@ -1,299 +0,0 @@ -#!/bin/bash -# -# This file is used to install PowerDB client on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- - -osType=Linux -pagMode=full - -if [ "$osType" != "Darwin" ]; then - script_dir=$(dirname $(readlink -f "$0")) - # Dynamic directory - data_dir="/var/lib/power" - log_dir="/var/log/power" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - data_dir="/var/lib/power" - log_dir="~/PowerDBLog" -fi - -log_link_dir="/usr/local/power/log" - -cfg_install_dir="/etc/power" - -if [ "$osType" != "Darwin" ]; then - bin_link_dir="/usr/bin" - lib_link_dir="/usr/lib" - lib64_link_dir="/usr/lib64" - inc_link_dir="/usr/include" -else - bin_link_dir="/usr/local/bin" - lib_link_dir="/usr/local/lib" - inc_link_dir="/usr/local/include" -fi - -#install main path -install_main_dir="/usr/local/power" - -# old bin dir -bin_dir="/usr/local/power/bin" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/power" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -function kill_client() { - pid=$(ps -ef | grep "power" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin - ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/power || : - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${bin_link_dir}/powerdemo || : - ${csudo}rm -f ${bin_link_dir}/powerdump || : - fi - ${csudo}rm -f ${bin_link_dir}/rmpower || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/power ] && ${csudo}ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || : - if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo}ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : - [ -x ${install_main_dir}/bin/powerdump ] && ${csudo}ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || : - fi - [ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : -} - -function clean_lib() { - sudo rm -f /usr/lib/libtaos.* || : - sudo rm -rf ${lib_dir} || : -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : - - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - if [ "$osType" != "Darwin" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [ -d "${lib64_link_dir}" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - else - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib - ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib - fi - - ${csudo}ldconfig -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/power.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/power.cfg ] && ${csudo}cp ${script_dir}/cfg/power.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/power.cfg ${install_main_dir}/cfg/power.cfg.org - ${csudo}ln -s ${cfg_install_dir}/power.cfg ${install_main_dir}/cfg -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - - if [ "$osType" != "Darwin" ]; then - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - else - mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - fi - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function update_PowerDB() { - # Start to update - if [ ! -e power.tar.gz ]; then - echo "File power.tar.gz does not exist" - exit 1 - fi - tar -zxf power.tar.gz - install_jemalloc - - echo -e "${GREEN}Start to update PowerDB client...${NC}" - # Stop the client shell if running - if pidof power &> /dev/null; then - kill_client - sleep 1 - fi - - install_main_path - - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mPowerDB client is updated successfully!${NC}" - - rm -rf $(tar -tf power.tar.gz) -} - -function install_PowerDB() { - # Start to install - if [ ! -e power.tar.gz ]; then - echo "File power.tar.gz does not exist" - exit 1 - fi - tar -zxf power.tar.gz - - echo -e "${GREEN}Start to install PowerDB client...${NC}" - - install_main_path - install_log - install_header - install_lib - install_jemalloc - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mPowerDB client is installed successfully!${NC}" - - rm -rf $(tar -tf power.tar.gz) -} - - -## ==============================Main program starts from here============================ -# Install or updata client and client -# if server is already install, don't install client - if [ -e ${bin_dir}/powerd ]; then - echo -e "\033[44;32;1mThere are already installed PowerDB server, so don't need install client!${NC}" - exit 0 - fi - - if [ -x ${bin_dir}/power ]; then - update_flag=1 - update_PowerDB - else - install_PowerDB - fi diff --git a/packaging/tools/install_client_pro.sh b/packaging/tools/install_client_pro.sh deleted file mode 100755 index 7551484e56c8d5fc59539d26f52ac7d318106fa5..0000000000000000000000000000000000000000 --- a/packaging/tools/install_client_pro.sh +++ /dev/null @@ -1,240 +0,0 @@ -#!/bin/bash -# -# This file is used to install ProDB client on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- - -osType=Linux -pagMode=full - -if [ "$osType" != "Darwin" ]; then - script_dir=$(dirname $(readlink -f "$0")) - # Dynamic directory - data_dir="/var/lib/ProDB" - log_dir="/var/log/ProDB" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - data_dir="/var/lib/ProDB" - log_dir="~/ProDB/log" -fi - -log_link_dir="/usr/local/ProDB/log" - -cfg_install_dir="/etc/ProDB" - -if [ "$osType" != "Darwin" ]; then - bin_link_dir="/usr/bin" - lib_link_dir="/usr/lib" - lib64_link_dir="/usr/lib64" - inc_link_dir="/usr/include" -else - bin_link_dir="/usr/local/bin" - lib_link_dir="/usr/local/lib" - inc_link_dir="/usr/local/include" -fi - -#install main path -install_main_dir="/usr/local/ProDB" - -# old bin dir -bin_dir="/usr/local/ProDB/bin" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -function kill_client() { - pid=$(ps -ef | grep "prodbc" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin - ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/prodbc || : - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${bin_link_dir}/prodemo || : - ${csudo}rm -f ${bin_link_dir}/prodump || : - fi - ${csudo}rm -f ${bin_link_dir}/rmprodb || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/prodbc ] && ${csudo}ln -s ${install_main_dir}/bin/prodbc ${bin_link_dir}/prodbc || : - if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/prodemo ] && ${csudo}ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || : - [ -x ${install_main_dir}/bin/prodump ] && ${csudo}ln -s ${install_main_dir}/bin/prodump ${bin_link_dir}/prodump || : - fi - [ -x ${install_main_dir}/bin/remove_client_prodb.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client_prodb.sh ${bin_link_dir}/rmprodb || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : -} - -function clean_lib() { - sudo rm -f /usr/lib/libtaos.* || : - sudo rm -rf ${lib_dir} || : -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - if [ "$osType" != "Darwin" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [ -d "${lib64_link_dir}" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - else - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib - ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib - fi - - ${csudo}ldconfig -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/prodb.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/prodb.cfg ] && ${csudo}cp ${script_dir}/cfg/prodb.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/prodb.cfg ${install_main_dir}/cfg/prodb.cfg.org - ${csudo}ln -s ${cfg_install_dir}/prodb.cfg ${install_main_dir}/cfg -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - - if [ "$osType" != "Darwin" ]; then - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - else - mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - fi - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function update_prodb() { - # Start to update - if [ ! -e prodb.tar.gz ]; then - echo "File prodb.tar.gz does not exist" - exit 1 - fi - tar -zxf prodb.tar.gz - - echo -e "${GREEN}Start to update ProDB client...${NC}" - # Stop the client shell if running - if pidof prodbc &> /dev/null; then - kill_client - sleep 1 - fi - - install_main_path - - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mProDB client is updated successfully!${NC}" - - rm -rf $(tar -tf prodb.tar.gz) -} - -function install_prodb() { - # Start to install - if [ ! -e prodb.tar.gz ]; then - echo "File prodb.tar.gz does not exist" - exit 1 - fi - tar -zxf prodb.tar.gz - - echo -e "${GREEN}Start to install ProDB client...${NC}" - - install_main_path - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mProDB client is installed successfully!${NC}" - - rm -rf $(tar -tf prodb.tar.gz) -} - - -## ==============================Main program starts from here============================ -# Install or updata client and client -# if server is already install, don't install client -if [ -e ${bin_dir}/prodbs ]; then - echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}" - exit 0 -fi - -if [ -x ${bin_dir}/prodbc ]; then - update_flag=1 - update_prodb -else - install_prodb -fi diff --git a/packaging/tools/install_client_tq.sh b/packaging/tools/install_client_tq.sh deleted file mode 100755 index 04479cfdbe234fe323a43a6a521992838069ecd9..0000000000000000000000000000000000000000 --- a/packaging/tools/install_client_tq.sh +++ /dev/null @@ -1,244 +0,0 @@ -#!/bin/bash -# -# This file is used to install TQ client on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -# -----------------------Variables definition--------------------- - -osType=Linux -pagMode=full - -if [ "$osType" != "Darwin" ]; then - script_dir=$(dirname $(readlink -f "$0")) - # Dynamic directory - data_dir="/var/lib/tq" - log_dir="/var/log/tq" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - data_dir="/var/lib/tq" - log_dir="~/TQLog" -fi - -log_link_dir="/usr/local/tq/log" - -cfg_install_dir="/etc/tq" - -if [ "$osType" != "Darwin" ]; then - bin_link_dir="/usr/bin" - lib_link_dir="/usr/lib" - lib64_link_dir="/usr/lib64" - inc_link_dir="/usr/include" -else - bin_link_dir="/usr/local/bin" - lib_link_dir="/usr/local/lib" - inc_link_dir="/usr/local/include" -fi - -#install main path -install_main_dir="/usr/local/tq" - -# old bin dir -bin_dir="/usr/local/tq/bin" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/tq" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -function kill_client() { - pid=$(ps -ef | grep "tq" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin - ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/tq || : - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${bin_link_dir}/tqdemo || : - ${csudo}rm -f ${bin_link_dir}/tqdump || : - fi - ${csudo}rm -f ${bin_link_dir}/rmtq || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/tq ] && ${csudo}ln -s ${install_main_dir}/bin/tq ${bin_link_dir}/tq || : - if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/tqdemo ] && ${csudo}ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || : - [ -x ${install_main_dir}/bin/tqdump ] && ${csudo}ln -s ${install_main_dir}/bin/tqdump ${bin_link_dir}/tqdump || : - fi - [ -x ${install_main_dir}/bin/remove_client_tq.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client_tq.sh ${bin_link_dir}/rmtq || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : -} - -function clean_lib() { - sudo rm -f /usr/lib/libtaos.* || : - sudo rm -rf ${lib_dir} || : -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : - - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - if [ "$osType" != "Darwin" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [ -d "${lib64_link_dir}" ]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - else - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib - ${csudo}ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib - fi - - ${csudo}ldconfig -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/tq.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/tq.cfg ] && ${csudo}cp ${script_dir}/cfg/tq.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/tq.cfg ${install_main_dir}/cfg/tq.cfg.org - ${csudo}ln -s ${cfg_install_dir}/tq.cfg ${install_main_dir}/cfg -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - - if [ "$osType" != "Darwin" ]; then - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - else - mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - fi - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function update_tq() { - # Start to update - if [ ! -e tq.tar.gz ]; then - echo "File tq.tar.gz does not exist" - exit 1 - fi - tar -zxf tq.tar.gz - - echo -e "${GREEN}Start to update TQ client...${NC}" - # Stop the client shell if running - if pidof tq &> /dev/null; then - kill_client - sleep 1 - fi - - install_main_path - - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mTQ client is updated successfully!${NC}" - - rm -rf $(tar -tf tq.tar.gz) -} - -function install_tq() { - # Start to install - if [ ! -e tq.tar.gz ]; then - echo "File tq.tar.gz does not exist" - exit 1 - fi - tar -zxf tq.tar.gz - - echo -e "${GREEN}Start to install TQ client...${NC}" - - install_main_path - install_log - install_header - install_lib - install_examples - install_bin - install_config - - echo - echo -e "\033[44;32;1mTQ client is installed successfully!${NC}" - - rm -rf $(tar -tf tq.tar.gz) -} - - -## ==============================Main program starts from here============================ -# Install or updata client and client -# if server is already install, don't install client - if [ -e ${bin_dir}/tqd ]; then - echo -e "\033[44;32;1mThere are already installed TQ server, so don't need install client!${NC}" - exit 0 - fi - - if [ -x ${bin_dir}/tq ]; then - update_flag=1 - update_tq - else - install_tq - fi diff --git a/packaging/tools/install_jh.sh b/packaging/tools/install_jh.sh deleted file mode 100755 index 667766154e92004b3eea695b08f0fc43872cbd2e..0000000000000000000000000000000000000000 --- a/packaging/tools/install_jh.sh +++ /dev/null @@ -1,950 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -verMode=edge -pagMode=full - -iplist="" -serverFqdn="" -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) -# Dynamic directory -data_dir="/var/lib/jh_taos" -log_dir="/var/log/jh_taos" - -data_link_dir="/usr/local/jh_taos/data" -log_link_dir="/usr/local/jh_taos/log" - -cfg_install_dir="/etc/jh_taos" - -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/jh_taos" - -# old bin dir -bin_dir="/usr/local/jh_taos/bin" - -service_config_dir="/etc/systemd/system" -nginx_port=6060 -nginx_dir="/usr/local/nginxd" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact jhict.com for support." - os_type=1 -fi - - -# ============================= get input parameters ================================================= - -# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] - -# set parameters by default value -interactiveFqdn=yes # [yes | no] -verType=server # [server | client] -initType=systemd # [systemd | service | ...] - -while getopts "hv:e:i:" arg -do - case $arg in - e) - #echo "interactiveFqdn=$OPTARG" - interactiveFqdn=$( echo $OPTARG ) - ;; - v) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - i) - #echo "initType=$OPTARG" - initType=$(echo $OPTARG) - ;; - h) - echo "Usage: `basename $0` -v [server | client] -e [yes | no]" - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; - esac -done - -function kill_process() { - pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin -# ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver -# ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d - if [ "$verMode" == "cluster" ]; then - ${csudo}mkdir -p ${nginx_dir} - fi -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/jh_taos || : - ${csudo}rm -f ${bin_link_dir}/jh_taosd || : - ${csudo}rm -f ${bin_link_dir}/jhdemo || : - ${csudo}rm -f ${bin_link_dir}/rmjh || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/jh_taos ] && ${csudo}ln -s ${install_main_dir}/bin/jh_taos ${bin_link_dir}/jh_taos || : - [ -x ${install_main_dir}/bin/jh_taosd ] && ${csudo}ln -s ${install_main_dir}/bin/jh_taosd ${bin_link_dir}/jh_taosd || : - [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo}ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || : - [ -x ${install_main_dir}/bin/remove_jh.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_jh.sh ${bin_link_dir}/rmjh || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : - - if [ "$verMode" == "cluster" ]; then - ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/* - ${csudo}mkdir -p ${nginx_dir}/logs - ${csudo}chmod 777 ${nginx_dir}/sbin/nginx - fi -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - - if [ "$osType" != "Darwin" ]; then - ${csudo}ldconfig - else - ${csudo}update_dyld_shared_cache - fi -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi -} - -function add_newHostname_to_hosts() { - localIp="127.0.0.1" - OLD_IFS="$IFS" - IFS=" " - iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') - arr=($iphost) - IFS="$OLD_IFS" - for s in ${arr[@]} - do - if [[ "$s" == "$localIp" ]]; then - return - fi - done - ${csudo}echo "127.0.0.1 $1" >> /etc/hosts ||: -} - -function set_hostname() { - echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname - while true; do - if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then - break - else - read -p "Please enter one hostname(must not be 'localhost'):" newHostname - fi - done - - ${csudo}hostname $newHostname ||: - retval=`echo $?` - if [[ $retval != 0 ]]; then - echo - echo "set hostname fail!" - return - fi - - #ubuntu/centos /etc/hostname - if [[ -e /etc/hostname ]]; then - ${csudo}echo $newHostname > /etc/hostname ||: - fi - - #debian: #HOSTNAME=yourname - if [[ -e /etc/sysconfig/network ]]; then - ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: - fi - - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg - serverFqdn=$newHostname - - if [[ -e /etc/hosts ]]; then - add_newHostname_to_hosts $newHostname - fi -} - -function is_correct_ipaddr() { - newIp=$1 - OLD_IFS="$IFS" - IFS=" " - arr=($iplist) - IFS="$OLD_IFS" - for s in ${arr[@]} - do - if [[ "$s" == "$newIp" ]]; then - return 0 - fi - done - - return 1 -} - -function set_ipAsFqdn() { - iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: - if [ -z "$iplist" ]; then - iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: - fi - - if [ -z "$iplist" ]; then - echo - echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" - localFqdn="127.0.0.1" - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg - serverFqdn=$localFqdn - echo - return - fi - - echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" - echo - echo -e -n "${GREEN}$iplist${NC}" - echo - echo - echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" - read localFqdn - while true; do - if [ ! -z "$localFqdn" ]; then - # Check if correct ip address - is_correct_ipaddr $localFqdn - retval=`echo $?` - if [[ $retval != 0 ]]; then - read -p "Please choose an IP from local IP list:" localFqdn - else - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg - serverFqdn=$localFqdn - break - fi - else - read -p "Please choose an IP from local IP list:" localFqdn - fi - done -} - -function local_fqdn_check() { - #serverFqdn=$(hostname) - echo - echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" - echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then - echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" - echo - - while true - do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi - done - fi -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo}cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo}ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg - - [ ! -z $1 ] && return 0 || : # only install client - - if ((${update_flag}==1)); then - return 0 - fi - - if [ "$interactiveFqdn" == "no" ]; then - return 0 - fi - - local_fqdn_check - - #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" - #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" - #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" - #FQDN_PATTERN=":[0-9]{1,5}$" - - # first full-qualified domain name (FQDN) for jh_iot cluster system - echo - echo -e -n "${GREEN}Enter FQDN:port (like h1.jhict.com:6030) of an existing jh_iot cluster node to join${NC}" - echo - echo -e -n "${GREEN}OR leave it blank to build one${NC}:" - read firstEp - while true; do - if [ ! -z "$firstEp" ]; then - # check the format of the firstEp - #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg - break - #else - # read -p "Please enter the correct FQDN:port: " firstEp - #fi - else - break - fi - done -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_data() { - ${csudo}mkdir -p ${data_dir} - - ${csudo}ln -s ${data_dir} ${install_main_dir}/data -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function clean_service_on_sysvinit() { - if pidof jh_taosd &> /dev/null; then - ${csudo}service jh_taosd stop || : - fi - - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/jh_taosd ]; then - ${csudo}chkconfig --del jh_taosd || : - fi - - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/jh_taosd ]; then - ${csudo}insserv -r jh_taosd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/jh_taosd ]; then - ${csudo}update-rc.d -f jh_taosd remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/jh_taosd || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install jh_taosd service - - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/jh_taosd.deb ${install_main_dir}/init.d/jh_taosd - ${csudo}cp ${script_dir}/init.d/jh_taosd.deb ${service_config_dir}/jh_taosd && ${csudo}chmod a+x ${service_config_dir}/jh_taosd - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/jh_taosd.rpm ${install_main_dir}/init.d/jh_taosd - ${csudo}cp ${script_dir}/init.d/jh_taosd.rpm ${service_config_dir}/jh_taosd && ${csudo}chmod a+x ${service_config_dir}/jh_taosd - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add jh_taosd || : - ${csudo}chkconfig --level 2345 jh_taosd on || : - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv jh_taosd || : - ${csudo}insserv -d jh_taosd || : - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d jh_taosd defaults || : - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - jh_taosd_service_config="${service_config_dir}/jh_taosd.service" - if systemctl is-active --quiet jh_taosd; then - echo "jh_iot is running, stopping it..." - ${csudo}systemctl stop jh_taosd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable jh_taosd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${jh_taosd_service_config} - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - if systemctl is-active --quiet nginxd; then - echo "Nginx for jh_iot is running, stopping it..." - ${csudo}systemctl stop nginxd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${nginx_service_config} - fi -} - -function install_service_on_systemd() { - clean_service_on_systemd - - service_config="${service_config_dir}/jh_taosd.service" - ${csudo}bash -c "echo '[Unit]' >> ${service_config}" - ${csudo}bash -c "echo 'Description=jh_iot server service' >> ${service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${service_config}" - ${csudo}bash -c "echo >> ${service_config}" - ${csudo}bash -c "echo '[Service]' >> ${service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/jh_taosd' >> ${service_config}" - ${csudo}bash -c "echo 'ExecStartPre=/usr/local/jh_taos/bin/startPre.sh' >> ${service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${service_config}" - ${csudo}bash -c "echo >> ${service_config}" - ${csudo}bash -c "echo '[Install]' >> ${service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${service_config}" - ${csudo}systemctl enable jh_taosd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=jh_iot arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo}systemctl enable tarbitratord - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - ${csudo}bash -c "echo '[Unit]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Description=Nginx For jh_iot Service' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Type=forking' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" - if ! ${csudo}systemctl enable nginxd &> /dev/null; then - ${csudo}systemctl daemon-reexec - ${csudo}systemctl enable nginxd - fi - ${csudo}systemctl start nginxd - fi -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - # must manual stop jh_taosd - kill_process jh_taosd - fi -} - -vercomp () { - if [[ $1 == $2 ]]; then - return 0 - fi - local IFS=. - local i ver1=($1) ver2=($2) - # fill empty fields in ver1 with zeros - for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do - ver1[i]=0 - done - - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]] - then - # fill empty fields in ver2 with zeros - ver2[i]=0 - fi - if ((10#${ver1[i]} > 10#${ver2[i]})) - then - return 1 - fi - if ((10#${ver1[i]} < 10#${ver2[i]})) - then - return 2 - fi - done - return 0 -} - -function is_version_compatible() { - curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` - - if [ -f ${script_dir}/driver/vercomp.txt ]; then - min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` - else - min_compatible_version=$(${script_dir}/bin/jh_taosd -V | head -1 | cut -d ' ' -f 5) - fi - - vercomp $curr_version $min_compatible_version - case $? in - 0) return 0;; - 1) return 0;; - 2) return 1;; - esac -} - -function update() { - # Start to update - if [ ! -e jh_taos.tar.gz ]; then - echo "File jh_taos.tar.gz does not exist" - exit 1 - fi - tar -zxf jh_taos.tar.gz - install_jemalloc - - # Check if version compatible - if ! is_version_compatible; then - echo -e "${RED}Version incompatible${NC}" - return 1 - fi - - echo -e "${GREEN}Start to update jh_iot...${NC}" - # Stop the service if running - if pidof jh_taosd &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop jh_taosd || : - elif ((${service_mod}==1)); then - ${csudo}service jh_taosd stop || : - else - kill_process jh_taosd - fi - sleep 1 - fi - if [ "$verMode" == "cluster" ]; then - if pidof nginx &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop nginxd || : - elif ((${service_mod}==1)); then - ${csudo}service nginxd stop || : - else - kill_process nginx - fi - sleep 1 - fi - fi - - install_main_path - - install_log - install_header - install_lib -# if [ "$pagMode" != "lite" ]; then -# install_connector -# fi -# install_examples - if [ -z $1 ]; then - install_bin - install_service - install_config - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if openresty is installed - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for jh_iot is updated successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for jh_iot does not work! Please try again!\033[0m" - fi - fi - fi - - #echo - #echo -e "\033[44;32;1mjh_iot is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure jh_iot ${NC}: edit /etc/jh_taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo}systemctl start jh_taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo}service jh_taosd start${NC}" - else - echo -e "${GREEN_DARK}To start jh_iot ${NC}: ./jh_taosd${NC}" - fi - - if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access jh_iot ${NC}: use ${GREEN_UNDERLINE}jh_taos -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - else - echo -e "${GREEN_DARK}To access jh_iot ${NC}: use ${GREEN_UNDERLINE}jh_taos -h $serverFqdn${NC} in shell${NC}" - fi - - echo - echo -e "\033[44;32;1mjh_iot is updated successfully!${NC}" - else - install_bin - install_config - - echo - echo -e "\033[44;32;1mjh_iot client is updated successfully!${NC}" - fi - - rm -rf $(tar -tf jh_taos.tar.gz) -} - -function install() { - # Start to install - if [ ! -e jh_taos.tar.gz ]; then - echo "File jh_taos.tar.gz does not exist" - exit 1 - fi - tar -zxf jh_taos.tar.gz - - echo -e "${GREEN}Start to install jh_iot...${NC}" - - install_main_path - - if [ -z $1 ]; then - install_data - fi - - install_log - install_header - install_lib - install_jemalloc -# if [ "$pagMode" != "lite" ]; then -# install_connector -# fi -# install_examples - - if [ -z $1 ]; then # install service and client - # For installing new - install_bin - install_service - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for jh_iot is installed successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for jh_iot does not work! Please try again!\033[0m" - fi - fi - fi - - install_config - - # Ask if to start the service - #echo - #echo -e "\033[44;32;1mjh_iot is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure jh_iot ${NC}: edit /etc/jh_taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo}systemctl start jh_taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo}service jh_taosd start${NC}" - else - echo -e "${GREEN_DARK}To start jh_iot ${NC}: jh_taosd${NC}" - fi - - if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo - elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $serverFqdn${GREEN_DARK} to login into jh_iot server${NC}" - echo - fi - echo -e "\033[44;32;1mjh_iot is installed successfully!${NC}" - echo - else # Only install client - install_bin - install_config - - echo - echo -e "\033[44;32;1mjh_iot client is installed successfully!${NC}" - fi - - rm -rf $(tar -tf jh_taos.tar.gz) -} - - -## ==============================Main program starts from here============================ -serverFqdn=$(hostname) -if [ "$verType" == "server" ]; then - # Install server and client - if [ -x ${bin_dir}/jh_taosd ]; then - update_flag=1 - update - else - install - fi -elif [ "$verType" == "client" ]; then - interactiveFqdn=no - # Only install client - if [ -x ${bin_dir}/jh_taos ]; then - update_flag=1 - update client - else - install client - fi -else - echo "please input correct verType" -fi diff --git a/packaging/tools/install_kh.sh b/packaging/tools/install_kh.sh deleted file mode 100755 index 8adad4dbbe32c6e052721f8657425b19d7338c4e..0000000000000000000000000000000000000000 --- a/packaging/tools/install_kh.sh +++ /dev/null @@ -1,950 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -verMode=edge -pagMode=full - -iplist="" -serverFqdn="" -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) -# Dynamic directory -data_dir="/var/lib/kinghistorian" -log_dir="/var/log/kinghistorian" - -data_link_dir="/usr/local/kinghistorian/data" -log_link_dir="/usr/local/kinghistorian/log" - -cfg_install_dir="/etc/kinghistorian" - -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/kinghistorian" - -# old bin dir -bin_dir="/usr/local/kinghistorian/bin" - -service_config_dir="/etc/systemd/system" -nginx_port=6060 -nginx_dir="/usr/local/nginxd" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact wellintech.com for support." - os_type=1 -fi - - -# ============================= get input parameters ================================================= - -# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] - -# set parameters by default value -interactiveFqdn=yes # [yes | no] -verType=server # [server | client] -initType=systemd # [systemd | service | ...] - -while getopts "hv:e:i:" arg -do - case $arg in - e) - #echo "interactiveFqdn=$OPTARG" - interactiveFqdn=$( echo $OPTARG ) - ;; - v) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - i) - #echo "initType=$OPTARG" - initType=$(echo $OPTARG) - ;; - h) - echo "Usage: `basename $0` -v [server | client] -e [yes | no]" - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; - esac -done - -function kill_process() { - pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin -# ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver -# ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d - if [ "$verMode" == "cluster" ]; then - ${csudo}mkdir -p ${nginx_dir} - fi -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/khclient || : - ${csudo}rm -f ${bin_link_dir}/khserver || : - ${csudo}rm -f ${bin_link_dir}/khdemo || : - ${csudo}rm -f ${bin_link_dir}/rmkh || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/khclient ] && ${csudo}ln -s ${install_main_dir}/bin/khclient ${bin_link_dir}/khclient || : - [ -x ${install_main_dir}/bin/khserver ] && ${csudo}ln -s ${install_main_dir}/bin/khserver ${bin_link_dir}/khserver || : - [ -x ${install_main_dir}/bin/khdemo ] && ${csudo}ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || : - [ -x ${install_main_dir}/bin/remove_kh.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_kh.sh ${bin_link_dir}/rmkh || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : - - if [ "$verMode" == "cluster" ]; then - ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/* - ${csudo}mkdir -p ${nginx_dir}/logs - ${csudo}chmod 777 ${nginx_dir}/sbin/nginx - fi -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - - if [ "$osType" != "Darwin" ]; then - ${csudo}ldconfig - else - ${csudo}update_dyld_shared_cache - fi -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi -} - -function add_newHostname_to_hosts() { - localIp="127.0.0.1" - OLD_IFS="$IFS" - IFS=" " - iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') - arr=($iphost) - IFS="$OLD_IFS" - for s in ${arr[@]} - do - if [[ "$s" == "$localIp" ]]; then - return - fi - done - ${csudo}echo "127.0.0.1 $1" >> /etc/hosts ||: -} - -function set_hostname() { - echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname - while true; do - if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then - break - else - read -p "Please enter one hostname(must not be 'localhost'):" newHostname - fi - done - - ${csudo}hostname $newHostname ||: - retval=`echo $?` - if [[ $retval != 0 ]]; then - echo - echo "set hostname fail!" - return - fi - - #ubuntu/centos /etc/hostname - if [[ -e /etc/hostname ]]; then - ${csudo}echo $newHostname > /etc/hostname ||: - fi - - #debian: #HOSTNAME=yourname - if [[ -e /etc/sysconfig/network ]]; then - ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: - fi - - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/kinghistorian.cfg - serverFqdn=$newHostname - - if [[ -e /etc/hosts ]]; then - add_newHostname_to_hosts $newHostname - fi -} - -function is_correct_ipaddr() { - newIp=$1 - OLD_IFS="$IFS" - IFS=" " - arr=($iplist) - IFS="$OLD_IFS" - for s in ${arr[@]} - do - if [[ "$s" == "$newIp" ]]; then - return 0 - fi - done - - return 1 -} - -function set_ipAsFqdn() { - iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: - if [ -z "$iplist" ]; then - iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: - fi - - if [ -z "$iplist" ]; then - echo - echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" - localFqdn="127.0.0.1" - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/kinghistorian.cfg - serverFqdn=$localFqdn - echo - return - fi - - echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" - echo - echo -e -n "${GREEN}$iplist${NC}" - echo - echo - echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" - read localFqdn - while true; do - if [ ! -z "$localFqdn" ]; then - # Check if correct ip address - is_correct_ipaddr $localFqdn - retval=`echo $?` - if [[ $retval != 0 ]]; then - read -p "Please choose an IP from local IP list:" localFqdn - else - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/kinghistorian.cfg - serverFqdn=$localFqdn - break - fi - else - read -p "Please choose an IP from local IP list:" localFqdn - fi - done -} - -function local_fqdn_check() { - #serverFqdn=$(hostname) - echo - echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" - echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then - echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" - echo - - while true - do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi - done - fi -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/kinghistorian.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/kinghistorian.cfg ] && ${csudo}cp ${script_dir}/cfg/kinghistorian.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/kinghistorian.cfg ${install_main_dir}/cfg/kinghistorian.cfg.org - ${csudo}ln -s ${cfg_install_dir}/kinghistorian.cfg ${install_main_dir}/cfg - - [ ! -z $1 ] && return 0 || : # only install client - - if ((${update_flag}==1)); then - return 0 - fi - - if [ "$interactiveFqdn" == "no" ]; then - return 0 - fi - - local_fqdn_check - - #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" - #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" - #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" - #FQDN_PATTERN=":[0-9]{1,5}$" - - # first full-qualified domain name (FQDN) for KingHistorian cluster system - echo - echo -e -n "${GREEN}Enter FQDN:port (like h1.wellintech.com:6030) of an existing KingHistorian cluster node to join${NC}" - echo - echo -e -n "${GREEN}OR leave it blank to build one${NC}:" - read firstEp - while true; do - if [ ! -z "$firstEp" ]; then - # check the format of the firstEp - #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/kinghistorian.cfg - break - #else - # read -p "Please enter the correct FQDN:port: " firstEp - #fi - else - break - fi - done -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_data() { - ${csudo}mkdir -p ${data_dir} - - ${csudo}ln -s ${data_dir} ${install_main_dir}/data -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function clean_service_on_sysvinit() { - if pidof khserver &> /dev/null; then - ${csudo}service khserver stop || : - fi - - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/khserver ]; then - ${csudo}chkconfig --del khserver || : - fi - - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/khserver ]; then - ${csudo}insserv -r khserver || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/khserver ]; then - ${csudo}update-rc.d -f khserver remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/khserver || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install khserver service - - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/khserver.deb ${install_main_dir}/init.d/khserver - ${csudo}cp ${script_dir}/init.d/khserver.deb ${service_config_dir}/khserver && ${csudo}chmod a+x ${service_config_dir}/khserver - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/khserver.rpm ${install_main_dir}/init.d/khserver - ${csudo}cp ${script_dir}/init.d/khserver.rpm ${service_config_dir}/khserver && ${csudo}chmod a+x ${service_config_dir}/khserver - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add khserver || : - ${csudo}chkconfig --level 2345 khserver on || : - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv khserver || : - ${csudo}insserv -d khserver || : - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d khserver defaults || : - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - khserver_service_config="${service_config_dir}/khserver.service" - if systemctl is-active --quiet khserver; then - echo "KingHistorian is running, stopping it..." - ${csudo}systemctl stop khserver &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable khserver &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${khserver_service_config} - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - if systemctl is-active --quiet nginxd; then - echo "Nginx for KingHistorian is running, stopping it..." - ${csudo}systemctl stop nginxd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${nginx_service_config} - fi -} - -function install_service_on_systemd() { - clean_service_on_systemd - - service_config="${service_config_dir}/khserver.service" - ${csudo}bash -c "echo '[Unit]' >> ${service_config}" - ${csudo}bash -c "echo 'Description=KingHistorian server service' >> ${service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${service_config}" - ${csudo}bash -c "echo >> ${service_config}" - ${csudo}bash -c "echo '[Service]' >> ${service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/khserver' >> ${service_config}" - ${csudo}bash -c "echo 'ExecStartPre=/usr/local/kinghistorian/bin/startPre.sh' >> ${service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${service_config}" - ${csudo}bash -c "echo >> ${service_config}" - ${csudo}bash -c "echo '[Install]' >> ${service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${service_config}" - ${csudo}systemctl enable khserver - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=KingHistorian arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo}systemctl enable tarbitratord - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - ${csudo}bash -c "echo '[Unit]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Description=Nginx For KingHistorian Service' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Type=forking' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" - if ! ${csudo}systemctl enable nginxd &> /dev/null; then - ${csudo}systemctl daemon-reexec - ${csudo}systemctl enable nginxd - fi - ${csudo}systemctl start nginxd - fi -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - # must manual stop khserver - kill_process khserver - fi -} - -vercomp () { - if [[ $1 == $2 ]]; then - return 0 - fi - local IFS=. - local i ver1=($1) ver2=($2) - # fill empty fields in ver1 with zeros - for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do - ver1[i]=0 - done - - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]] - then - # fill empty fields in ver2 with zeros - ver2[i]=0 - fi - if ((10#${ver1[i]} > 10#${ver2[i]})) - then - return 1 - fi - if ((10#${ver1[i]} < 10#${ver2[i]})) - then - return 2 - fi - done - return 0 -} - -function is_version_compatible() { - curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` - - if [ -f ${script_dir}/driver/vercomp.txt ]; then - min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` - else - min_compatible_version=$(${script_dir}/bin/khserver -V | head -1 | cut -d ' ' -f 5) - fi - - vercomp $curr_version $min_compatible_version - case $? in - 0) return 0;; - 1) return 0;; - 2) return 1;; - esac -} - -function update() { - # Start to update - if [ ! -e kinghistorian.tar.gz ]; then - echo "File kinghistorian.tar.gz does not exist" - exit 1 - fi - tar -zxf kinghistorian.tar.gz - install_jemalloc - - # Check if version compatible - if ! is_version_compatible; then - echo -e "${RED}Version incompatible${NC}" - return 1 - fi - - echo -e "${GREEN}Start to update KingHistorian...${NC}" - # Stop the service if running - if pidof khserver &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop khserver || : - elif ((${service_mod}==1)); then - ${csudo}service khserver stop || : - else - kill_process khserver - fi - sleep 1 - fi - if [ "$verMode" == "cluster" ]; then - if pidof nginx &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop nginxd || : - elif ((${service_mod}==1)); then - ${csudo}service nginxd stop || : - else - kill_process nginx - fi - sleep 1 - fi - fi - - install_main_path - - install_log - install_header - install_lib -# if [ "$pagMode" != "lite" ]; then -# install_connector -# fi -# install_examples - if [ -z $1 ]; then - install_bin - install_service - install_config - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if openresty is installed - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for KingHistorian is updated successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for KingHistorian does not work! Please try again!\033[0m" - fi - fi - fi - - #echo - #echo -e "\033[44;32;1mKingHistorian is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure KingHistorian ${NC}: edit /etc/kinghistorian/kinghistorian.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo}systemctl start khserver${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo}service khserver start${NC}" - else - echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ./khserver${NC}" - fi - - if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access KingHistorian ${NC}: use ${GREEN_UNDERLINE}khclient -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - else - echo -e "${GREEN_DARK}To access KingHistorian ${NC}: use ${GREEN_UNDERLINE}khclient -h $serverFqdn${NC} in shell${NC}" - fi - - echo - echo -e "\033[44;32;1mKingHistorian is updated successfully!${NC}" - else - install_bin - install_config - - echo - echo -e "\033[44;32;1mKingHistorian client is updated successfully!${NC}" - fi - - rm -rf $(tar -tf kinghistorian.tar.gz) -} - -function install() { - # Start to install - if [ ! -e kinghistorian.tar.gz ]; then - echo "File kinghistorian.tar.gz does not exist" - exit 1 - fi - tar -zxf kinghistorian.tar.gz - - echo -e "${GREEN}Start to install KingHistorian...${NC}" - - install_main_path - - if [ -z $1 ]; then - install_data - fi - - install_log - install_header - install_lib - install_jemalloc -# if [ "$pagMode" != "lite" ]; then -# install_connector -# fi -# install_examples - - if [ -z $1 ]; then # install service and client - # For installing new - install_bin - install_service - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for KingHistorian is installed successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for KingHistorian does not work! Please try again!\033[0m" - fi - fi - fi - - install_config - - # Ask if to start the service - #echo - #echo -e "\033[44;32;1mKingHistorian is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure KingHistorian ${NC}: edit /etc/kinghistorian/kinghistorian.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo}systemctl start khserver${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo}service khserver start${NC}" - else - echo -e "${GREEN_DARK}To start KingHistorian ${NC}: khserver${NC}" - fi - - if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo - elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $serverFqdn${GREEN_DARK} to login into KingHistorian server${NC}" - echo - fi - echo -e "\033[44;32;1mKingHistorian is installed successfully!${NC}" - echo - else # Only install client - install_bin - install_config - - echo - echo -e "\033[44;32;1mKingHistorian client is installed successfully!${NC}" - fi - - rm -rf $(tar -tf kinghistorian.tar.gz) -} - - -## ==============================Main program starts from here============================ -serverFqdn=$(hostname) -if [ "$verType" == "server" ]; then - # Install server and client - if [ -x ${bin_dir}/khserver ]; then - update_flag=1 - update - else - install - fi -elif [ "$verType" == "client" ]; then - interactiveFqdn=no - # Only install client - if [ -x ${bin_dir}/khclient ]; then - update_flag=1 - update client - else - install client - fi -else - echo "please input correct verType" -fi diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh deleted file mode 100755 index 705c86432569c3e59145f8d887e55b78b111a4a8..0000000000000000000000000000000000000000 --- a/packaging/tools/install_power.sh +++ /dev/null @@ -1,971 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -verMode=edge -pagMode=full - -iplist="" -serverFqdn="" -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) -# Dynamic directory -data_dir="/var/lib/power" -log_dir="/var/log/power" - -data_link_dir="/usr/local/power/data" -log_link_dir="/usr/local/power/log" - -cfg_install_dir="/etc/power" - -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/power" - -# old bin dir -bin_dir="/usr/local/power/bin" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/power" - -service_config_dir="/etc/systemd/system" -nginx_port=6060 -nginx_dir="/usr/local/nginxd" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact taosdata.com for support." - os_type=1 -fi - - -# ============================= get input parameters ================================================= - -# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] - -# set parameters by default value -interactiveFqdn=yes # [yes | no] -verType=server # [server | client] -initType=systemd # [systemd | service | ...] - -while getopts "hv:e:i:" arg -do - case $arg in - e) - #echo "interactiveFqdn=$OPTARG" - interactiveFqdn=$( echo $OPTARG ) - ;; - v) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - i) - #echo "initType=$OPTARG" - initType=$(echo $OPTARG) - ;; - h) - echo "Usage: `basename $0` -v [server | client] -e [yes | no]" - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; - esac -done - -#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" - -function kill_process() { - pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin - ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d - if [ "$verMode" == "cluster" ]; then - ${csudo}mkdir -p ${nginx_dir} - fi -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/power || : - ${csudo}rm -f ${bin_link_dir}/powerd || : - ${csudo}rm -f ${bin_link_dir}/powerdemo || : - ${csudo}rm -f ${bin_link_dir}/rmpower || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/power ] && ${csudo}ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || : - [ -x ${install_main_dir}/bin/powerd ] && ${csudo}ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || : - [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo}ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : - [ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : - - if [ "$verMode" == "cluster" ]; then - ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/* - ${csudo}mkdir -p ${nginx_dir}/logs - ${csudo}chmod 777 ${nginx_dir}/sbin/nginx - fi -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - - ${csudo}ldconfig -} - -function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function add_newHostname_to_hosts() { - localIp="127.0.0.1" - OLD_IFS="$IFS" - IFS=" " - iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') - arr=($iphost) - IFS="$OLD_IFS" - for s in "${arr[@]}" - do - if [[ "$s" == "$localIp" ]]; then - return - fi - done - ${csudo}echo "127.0.0.1 $1" >> /etc/hosts ||: -} - -function set_hostname() { - echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname - while true; do - if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then - break - else - read -p "Please enter one hostname(must not be 'localhost'):" newHostname - fi - done - - ${csudo}hostname $newHostname ||: - retval=`echo $?` - if [[ $retval != 0 ]]; then - echo - echo "set hostname fail!" - return - fi - #echo -e -n "$(hostnamectl status --static)" - #echo -e -n "$(hostnamectl status --transient)" - #echo -e -n "$(hostnamectl status --pretty)" - - #ubuntu/centos /etc/hostname - if [[ -e /etc/hostname ]]; then - ${csudo}echo $newHostname > /etc/hostname ||: - fi - - #debian: #HOSTNAME=yourname - if [[ -e /etc/sysconfig/network ]]; then - ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: - fi - - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/power.cfg - serverFqdn=$newHostname - - if [[ -e /etc/hosts ]]; then - add_newHostname_to_hosts $newHostname - fi -} - -function is_correct_ipaddr() { - newIp=$1 - OLD_IFS="$IFS" - IFS=" " - arr=($iplist) - IFS="$OLD_IFS" - for s in "${arr[@]}" - do - if [[ "$s" == "$newIp" ]]; then - return 0 - fi - done - - return 1 -} - -function set_ipAsFqdn() { - iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: - if [ -z "$iplist" ]; then - iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: - fi - - if [ -z "$iplist" ]; then - echo - echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" - localFqdn="127.0.0.1" - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/power.cfg - serverFqdn=$localFqdn - echo - return - fi - - echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" - echo - echo -e -n "${GREEN}$iplist${NC}" - echo - echo - echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" - read localFqdn - while true; do - if [ ! -z "$localFqdn" ]; then - # Check if correct ip address - is_correct_ipaddr $localFqdn - retval=`echo $?` - if [[ $retval != 0 ]]; then - read -p "Please choose an IP from local IP list:" localFqdn - else - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/power.cfg - serverFqdn=$localFqdn - break - fi - else - read -p "Please choose an IP from local IP list:" localFqdn - fi - done -} - -function local_fqdn_check() { - #serverFqdn=$(hostname) - echo - echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" - echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then - echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" - echo - - while true - do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi - done - fi -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/power.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/power.cfg ] && ${csudo}cp ${script_dir}/cfg/power.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/power.cfg ${install_main_dir}/cfg/power.cfg.org - ${csudo}ln -s ${cfg_install_dir}/power.cfg ${install_main_dir}/cfg - - [ ! -z $1 ] && return 0 || : # only install client - - if ((${update_flag}==1)); then - return 0 - fi - - if [ "$interactiveFqdn" == "no" ]; then - return 0 - fi - - local_fqdn_check - - #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" - #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" - #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" - #FQDN_PATTERN=":[0-9]{1,5}$" - - # first full-qualified domain name (FQDN) for PowerDB cluster system - echo - echo -e -n "${GREEN}Enter FQDN:port (like h1.powerdata.com:6030) of an existing PowerDB cluster node to join${NC}" - echo - echo -e -n "${GREEN}OR leave it blank to build one${NC}:" - read firstEp - while true; do - if [ ! -z "$firstEp" ]; then - # check the format of the firstEp - #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/power.cfg - break - #else - # read -p "Please enter the correct FQDN:port: " firstEp - #fi - else - break - fi - done -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_data() { - ${csudo}mkdir -p ${data_dir} - - ${csudo}ln -s ${data_dir} ${install_main_dir}/data -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function clean_service_on_sysvinit() { - #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start" - #${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || : - - if pidof powerd &> /dev/null; then - ${csudo}service powerd stop || : - fi - - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/powerd ]; then - ${csudo}chkconfig --del powerd || : - fi - - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/powerd ]; then - ${csudo}insserv -r powerd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/powerd ]; then - ${csudo}update-rc.d -f powerd remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/powerd || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install powerd service - - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/powerd.deb ${install_main_dir}/init.d/powerd - ${csudo}cp ${script_dir}/init.d/powerd.deb ${service_config_dir}/powerd && ${csudo}chmod a+x ${service_config_dir}/powerd - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/powerd.rpm ${install_main_dir}/init.d/powerd - ${csudo}cp ${script_dir}/init.d/powerd.rpm ${service_config_dir}/powerd && ${csudo}chmod a+x ${service_config_dir}/powerd - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start" - #${csudo}grep -q -F "$restart_config_str" /etc/inittab || ${csudo}bash -c "echo '${restart_config_str}' >> /etc/inittab" - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add powerd || : - ${csudo}chkconfig --level 2345 powerd on || : - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv powerd || : - ${csudo}insserv -d powerd || : - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d powerd defaults || : - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - powerd_service_config="${service_config_dir}/powerd.service" - if systemctl is-active --quiet powerd; then - echo "PowerDB is running, stopping it..." - ${csudo}systemctl stop powerd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable powerd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${powerd_service_config} - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - if systemctl is-active --quiet nginxd; then - echo "Nginx for PowerDB is running, stopping it..." - ${csudo}systemctl stop nginxd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${nginx_service_config} - fi -} - -# power:2345:respawn:/etc/init.d/powerd start - -function install_service_on_systemd() { - clean_service_on_systemd - - powerd_service_config="${service_config_dir}/powerd.service" - ${csudo}bash -c "echo '[Unit]' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'Description=PowerDB server service' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${powerd_service_config}" - ${csudo}bash -c "echo >> ${powerd_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/powerd' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'ExecStartPre=/usr/local/power/bin/startPre.sh' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${powerd_service_config}" - ${csudo}bash -c "echo >> ${powerd_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${powerd_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${powerd_service_config}" - ${csudo}systemctl enable powerd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=PowerDB arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo}systemctl enable tarbitratord - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - ${csudo}bash -c "echo '[Unit]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Type=forking' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" - if ! ${csudo}systemctl enable nginxd &> /dev/null; then - ${csudo}systemctl daemon-reexec - ${csudo}systemctl enable nginxd - fi - ${csudo}systemctl start nginxd - fi -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - # must manual stop powerd - kill_process powerd - fi -} - -vercomp () { - if [[ $1 == $2 ]]; then - return 0 - fi - local IFS=. - local i ver1=($1) ver2=($2) - # fill empty fields in ver1 with zeros - for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do - ver1[i]=0 - done - - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]] - then - # fill empty fields in ver2 with zeros - ver2[i]=0 - fi - if ((10#${ver1[i]} > 10#${ver2[i]})) - then - return 1 - fi - if ((10#${ver1[i]} < 10#${ver2[i]})) - then - return 2 - fi - done - return 0 -} - -function is_version_compatible() { - - curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` - - if [ -f ${script_dir}/driver/vercomp.txt ]; then - min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` - else - min_compatible_version=$(${script_dir}/bin/powerd -V | head -1 | cut -d ' ' -f 5) - fi - - vercomp $curr_version $min_compatible_version - case $? in - 0) return 0;; - 1) return 0;; - 2) return 1;; - esac -} - -function update_PowerDB() { - # Start to update - if [ ! -e power.tar.gz ]; then - echo "File power.tar.gz does not exist" - exit 1 - fi - tar -zxf power.tar.gz - install_jemalloc - - # Check if version compatible - if ! is_version_compatible; then - echo -e "${RED}Version incompatible${NC}" - return 1 - fi - - echo -e "${GREEN}Start to update PowerDB...${NC}" - # Stop the service if running - if pidof powerd &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop powerd || : - elif ((${service_mod}==1)); then - ${csudo}service powerd stop || : - else - kill_process powerd - fi - sleep 1 - fi - if [ "$verMode" == "cluster" ]; then - if pidof nginx &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop nginxd || : - elif ((${service_mod}==1)); then - ${csudo}service nginxd stop || : - else - kill_process nginx - fi - sleep 1 - fi - fi - - install_main_path - - install_log - install_header - install_lib - if [ "$pagMode" != "lite" ]; then - install_connector - fi - install_examples - if [ -z $1 ]; then - install_bin - install_service - install_config - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if openresty is installed - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for PowerDB is updated successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for PowerDB does not work! Please try again!\033[0m" - fi - fi - fi - - #echo - #echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/power.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo}systemctl start powerd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo}service powerd start${NC}" - else - echo -e "${GREEN_DARK}To start PowerDB ${NC}: ./powerd${NC}" - fi - - if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - else - echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power -h $serverFqdn${NC} in shell${NC}" - fi - - echo - echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}" - else - install_bin - install_config - - echo - echo -e "\033[44;32;1mPowerDB client is updated successfully!${NC}" - fi - - rm -rf $(tar -tf power.tar.gz) -} - -function install_PowerDB() { - # Start to install - if [ ! -e power.tar.gz ]; then - echo "File power.tar.gz does not exist" - exit 1 - fi - tar -zxf power.tar.gz - - echo -e "${GREEN}Start to install PowerDB...${NC}" - - install_main_path - - if [ -z $1 ]; then - install_data - fi - - install_log - install_header - install_lib - install_jemalloc - if [ "$pagMode" != "lite" ]; then - install_connector - fi - install_examples - - if [ -z $1 ]; then # install service and client - # For installing new - install_bin - install_service - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for PowerDB is installed successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for PowerDB does not work! Please try again!\033[0m" - fi - fi - fi - - install_config - - # Ask if to start the service - #echo - #echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/power.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo}systemctl start powerd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo}service powerd start${NC}" - else - echo -e "${GREEN_DARK}To start PowerDB ${NC}: powerd${NC}" - fi - - #if [ ${openresty_work} = 'true' ]; then - # echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - #else - # echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}" - #fi - - if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo - elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $serverFqdn${GREEN_DARK} to login into PowerDB server${NC}" - echo - fi - echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}" - echo - else # Only install client - install_bin - install_config - - echo - echo -e "\033[44;32;1mPowerDB client is installed successfully!${NC}" - fi - - rm -rf $(tar -tf power.tar.gz) -} - - -## ==============================Main program starts from here============================ -serverFqdn=$(hostname) -if [ "$verType" == "server" ]; then - # Install server and client - if [ -x ${bin_dir}/powerd ]; then - update_flag=1 - update_PowerDB - else - install_PowerDB - fi -elif [ "$verType" == "client" ]; then - interactiveFqdn=no - # Only install client - if [ -x ${bin_dir}/power ]; then - update_flag=1 - update_PowerDB client - else - install_PowerDB client - fi -else - echo "please input correct verType" -fi diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh deleted file mode 100755 index f1c71985f1464e3074c006b1eec7597f5990bf9d..0000000000000000000000000000000000000000 --- a/packaging/tools/install_pro.sh +++ /dev/null @@ -1,951 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -verMode=edge -pagMode=full - -iplist="" -serverFqdn="" -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) -# Dynamic directory -data_dir="/var/lib/ProDB" -log_dir="/var/log/ProDB" - -data_link_dir="/usr/local/ProDB/data" -log_link_dir="/usr/local/ProDB/log" - -cfg_install_dir="/etc/ProDB" - -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/ProDB" - -# old bin dir -bin_dir="/usr/local/ProDB/bin" - -service_config_dir="/etc/systemd/system" -nginx_port=6060 -nginx_dir="/usr/local/nginxd" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact hanatech.com.cn for support." - os_type=1 -fi - - -# ============================= get input parameters ================================================= - -# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] - -# set parameters by default value -interactiveFqdn=yes # [yes | no] -verType=server # [server | client] -initType=systemd # [systemd | service | ...] - -while getopts "hv:e:i:" arg -do - case $arg in - e) - #echo "interactiveFqdn=$OPTARG" - interactiveFqdn=$( echo $OPTARG ) - ;; - v) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - i) - #echo "initType=$OPTARG" - initType=$(echo $OPTARG) - ;; - h) - echo "Usage: `basename $0` -v [server | client] -e [yes | no]" - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; - esac -done - -function kill_process() { - pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin -# ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver -# ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d - if [ "$verMode" == "cluster" ]; then - ${csudo}mkdir -p ${nginx_dir} - fi -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/prodbc || : - ${csudo}rm -f ${bin_link_dir}/prodbs || : - ${csudo}rm -f ${bin_link_dir}/prodemo || : - ${csudo}rm -f ${bin_link_dir}/rmprodb || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/prodbc ] && ${csudo}ln -s ${install_main_dir}/bin/prodbc ${bin_link_dir}/prodbc || : - [ -x ${install_main_dir}/bin/prodbs ] && ${csudo}ln -s ${install_main_dir}/bin/prodbs ${bin_link_dir}/prodbs || : - [ -x ${install_main_dir}/bin/prodemo ] && ${csudo}ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || : - [ -x ${install_main_dir}/bin/remove_pro.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_pro.sh ${bin_link_dir}/rmprodb || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : - - if [ "$verMode" == "cluster" ]; then - ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/* - ${csudo}mkdir -p ${nginx_dir}/logs - ${csudo}chmod 777 ${nginx_dir}/sbin/nginx - fi -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - - if [ "$osType" != "Darwin" ]; then - ${csudo}ldconfig - else - ${csudo}update_dyld_shared_cache - fi -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi -} - -function add_newHostname_to_hosts() { - localIp="127.0.0.1" - OLD_IFS="$IFS" - IFS=" " - iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') - arr=($iphost) - IFS="$OLD_IFS" - for s in "${arr[@]}" - do - if [[ "$s" == "$localIp" ]]; then - return - fi - done - ${csudo}echo "127.0.0.1 $1" >> /etc/hosts ||: -} - -function set_hostname() { - echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname - while true; do - if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then - break - else - read -p "Please enter one hostname(must not be 'localhost'):" newHostname - fi - done - - ${csudo}hostname $newHostname ||: - retval=`echo $?` - if [[ $retval != 0 ]]; then - echo - echo "set hostname fail!" - return - fi - - #ubuntu/centos /etc/hostname - if [[ -e /etc/hostname ]]; then - ${csudo}echo $newHostname > /etc/hostname ||: - fi - - #debian: #HOSTNAME=yourname - if [[ -e /etc/sysconfig/network ]]; then - ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: - fi - - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/prodb.cfg - serverFqdn=$newHostname - - if [[ -e /etc/hosts ]]; then - add_newHostname_to_hosts $newHostname - fi -} - -function is_correct_ipaddr() { - newIp=$1 - OLD_IFS="$IFS" - IFS=" " - arr=($iplist) - IFS="$OLD_IFS" - for s in "${arr[@]}" - do - if [[ "$s" == "$newIp" ]]; then - return 0 - fi - done - - return 1 -} - -function set_ipAsFqdn() { - iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: - if [ -z "$iplist" ]; then - iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: - fi - - if [ -z "$iplist" ]; then - echo - echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" - localFqdn="127.0.0.1" - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/prodb.cfg - serverFqdn=$localFqdn - echo - return - fi - - echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" - echo - echo -e -n "${GREEN}$iplist${NC}" - echo - echo - echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" - read localFqdn - while true; do - if [ ! -z "$localFqdn" ]; then - # Check if correct ip address - is_correct_ipaddr $localFqdn - retval=`echo $?` - if [[ $retval != 0 ]]; then - read -p "Please choose an IP from local IP list:" localFqdn - else - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/prodb.cfg - serverFqdn=$localFqdn - break - fi - else - read -p "Please choose an IP from local IP list:" localFqdn - fi - done -} - -function local_fqdn_check() { - #serverFqdn=$(hostname) - echo - echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" - echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then - echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" - echo - - while true - do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi - done - fi -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/prodb.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/prodb.cfg ] && ${csudo}cp ${script_dir}/cfg/prodb.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/prodb.cfg ${install_main_dir}/cfg/prodb.cfg.org - ${csudo}ln -s ${cfg_install_dir}/prodb.cfg ${install_main_dir}/cfg - - [ ! -z $1 ] && return 0 || : # only install client - - if ((${update_flag}==1)); then - return 0 - fi - - if [ "$interactiveFqdn" == "no" ]; then - return 0 - fi - - local_fqdn_check - - #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" - #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" - #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" - #FQDN_PATTERN=":[0-9]{1,5}$" - - # first full-qualified domain name (FQDN) for ProDB cluster system - echo - echo -e -n "${GREEN}Enter FQDN:port (like h1.hanatech.com.cn:6030) of an existing ProDB cluster node to join${NC}" - echo - echo -e -n "${GREEN}OR leave it blank to build one${NC}:" - read firstEp - while true; do - if [ ! -z "$firstEp" ]; then - # check the format of the firstEp - #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/prodb.cfg - break - #else - # read -p "Please enter the correct FQDN:port: " firstEp - #fi - else - break - fi - done -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_data() { - ${csudo}mkdir -p ${data_dir} - - ${csudo}ln -s ${data_dir} ${install_main_dir}/data -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function clean_service_on_sysvinit() { - if pidof prodbs &> /dev/null; then - ${csudo}service prodbs stop || : - fi - - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/prodbs ]; then - ${csudo}chkconfig --del prodbs || : - fi - - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/prodbs ]; then - ${csudo}insserv -r prodbs || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/prodbs ]; then - ${csudo}update-rc.d -f prodbs remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/prodbs || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install prodbs service - - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/prodbs.deb ${install_main_dir}/init.d/prodbs - ${csudo}cp ${script_dir}/init.d/prodbs.deb ${service_config_dir}/prodbs && ${csudo}chmod a+x ${service_config_dir}/prodbs - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/prodbs.rpm ${install_main_dir}/init.d/prodbs - ${csudo}cp ${script_dir}/init.d/prodbs.rpm ${service_config_dir}/prodbs && ${csudo}chmod a+x ${service_config_dir}/prodbs - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add prodbs || : - ${csudo}chkconfig --level 2345 prodbs on || : - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv prodbs || : - ${csudo}insserv -d prodbs || : - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d prodbs defaults || : - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - prodbs_service_config="${service_config_dir}/prodbs.service" - if systemctl is-active --quiet prodbs; then - echo "ProDB is running, stopping it..." - ${csudo}systemctl stop prodbs &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable prodbs &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${prodbs_service_config} - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - if systemctl is-active --quiet nginxd; then - echo "Nginx for ProDB is running, stopping it..." - ${csudo}systemctl stop nginxd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${nginx_service_config} - fi -} - -function install_service_on_systemd() { - clean_service_on_systemd - - prodbs_service_config="${service_config_dir}/prodbs.service" - ${csudo}bash -c "echo '[Unit]' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'Description=ProDB server service' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${prodbs_service_config}" - ${csudo}bash -c "echo >> ${prodbs_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/prodbs' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'ExecStartPre=/usr/local/ProDB/bin/startPre.sh' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${prodbs_service_config}" - ${csudo}bash -c "echo >> ${prodbs_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${prodbs_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${prodbs_service_config}" - ${csudo}systemctl enable prodbs - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=ProDB arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo}systemctl enable tarbitratord - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - ${csudo}bash -c "echo '[Unit]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Type=forking' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" - if ! ${csudo}systemctl enable nginxd &> /dev/null; then - ${csudo}systemctl daemon-reexec - ${csudo}systemctl enable nginxd - fi - ${csudo}systemctl start nginxd - fi -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - # must manual stop prodbs - kill_process prodbs - fi -} - -vercomp () { - if [[ $1 == $2 ]]; then - return 0 - fi - local IFS=. - local i ver1=($1) ver2=($2) - # fill empty fields in ver1 with zeros - for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do - ver1[i]=0 - done - - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]] - then - # fill empty fields in ver2 with zeros - ver2[i]=0 - fi - if ((10#${ver1[i]} > 10#${ver2[i]})) - then - return 1 - fi - if ((10#${ver1[i]} < 10#${ver2[i]})) - then - return 2 - fi - done - return 0 -} - -function is_version_compatible() { - curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` - - if [ -f ${script_dir}/driver/vercomp.txt ]; then - min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` - else - min_compatible_version=$(${script_dir}/bin/prodbs -V | head -1 | cut -d ' ' -f 5) - fi - - vercomp $curr_version $min_compatible_version - case $? in - 0) return 0;; - 1) return 0;; - 2) return 1;; - esac -} - -function update_prodb() { - # Start to update - if [ ! -e prodb.tar.gz ]; then - echo "File prodb.tar.gz does not exist" - exit 1 - fi - tar -zxf prodb.tar.gz - install_jemalloc - - # Check if version compatible - if ! is_version_compatible; then - echo -e "${RED}Version incompatible${NC}" - return 1 - fi - - echo -e "${GREEN}Start to update ProDB...${NC}" - # Stop the service if running - if pidof prodbs &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop prodbs || : - elif ((${service_mod}==1)); then - ${csudo}service prodbs stop || : - else - kill_process prodbs - fi - sleep 1 - fi - if [ "$verMode" == "cluster" ]; then - if pidof nginx &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop nginxd || : - elif ((${service_mod}==1)); then - ${csudo}service nginxd stop || : - else - kill_process nginx - fi - sleep 1 - fi - fi - - install_main_path - - install_log - install_header - install_lib -# if [ "$pagMode" != "lite" ]; then -# install_connector -# fi -# install_examples - if [ -z $1 ]; then - install_bin - install_service - install_config - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if openresty is installed - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for ProDB is updated successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for ProDB does not work! Please try again!\033[0m" - fi - fi - fi - - #echo - #echo -e "\033[44;32;1mProDB is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/prodb.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo}systemctl start prodbs${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo}service prodbs start${NC}" - else - echo -e "${GREEN_DARK}To start ProDB ${NC}: ./prodbs${NC}" - fi - - if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access ProDB ${NC}: use ${GREEN_UNDERLINE}prodbc -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - else - echo -e "${GREEN_DARK}To access ProDB ${NC}: use ${GREEN_UNDERLINE}prodbc -h $serverFqdn${NC} in shell${NC}" - fi - - echo - echo -e "\033[44;32;1mProDB is updated successfully!${NC}" - else - install_bin - install_config - - echo - echo -e "\033[44;32;1mProDB client is updated successfully!${NC}" - fi - - rm -rf $(tar -tf prodb.tar.gz) -} - -function install_prodb() { - # Start to install - if [ ! -e prodb.tar.gz ]; then - echo "File prodb.tar.gz does not exist" - exit 1 - fi - tar -zxf prodb.tar.gz - - echo -e "${GREEN}Start to install ProDB...${NC}" - - install_main_path - - if [ -z $1 ]; then - install_data - fi - - install_log - install_header - install_lib - install_jemalloc -# if [ "$pagMode" != "lite" ]; then -# install_connector -# fi -# install_examples - - if [ -z $1 ]; then # install service and client - # For installing new - install_bin - install_service - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for ProDB is installed successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for ProDB does not work! Please try again!\033[0m" - fi - fi - fi - - install_config - - # Ask if to start the service - #echo - #echo -e "\033[44;32;1mProDB is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/prodb.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo}systemctl start prodbs${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo}service prodbs start${NC}" - else - echo -e "${GREEN_DARK}To start ProDB ${NC}: prodbs${NC}" - fi - - if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo - elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $serverFqdn${GREEN_DARK} to login into ProDB server${NC}" - echo - fi - echo -e "\033[44;32;1mProDB is installed successfully!${NC}" - echo - else # Only install client - install_bin - install_config - - echo - echo -e "\033[44;32;1mProDB client is installed successfully!${NC}" - fi - - rm -rf $(tar -tf prodb.tar.gz) -} - - -## ==============================Main program starts from here============================ -serverFqdn=$(hostname) -if [ "$verType" == "server" ]; then - # Install server and client - if [ -x ${bin_dir}/prodbs ]; then - update_flag=1 - update_prodb - else - install_prodb - fi -elif [ "$verType" == "client" ]; then - interactiveFqdn=no - # Only install client - if [ -x ${bin_dir}/prodbc ]; then - update_flag=1 - update_prodb client - else - install_prodb client - fi -else - echo "please input correct verType" -fi diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh deleted file mode 100755 index e74d5c7c4576f96608bc8a727b97aa84b9626817..0000000000000000000000000000000000000000 --- a/packaging/tools/install_tq.sh +++ /dev/null @@ -1,971 +0,0 @@ -#!/bin/bash -# -# This file is used to install database on linux systems. The operating system -# is required to use systemd to manage services at boot - -set -e -#set -x - -verMode=edge -pagMode=full - -iplist="" -serverFqdn="" -# -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) -# Dynamic directory -data_dir="/var/lib/tq" -log_dir="/var/log/tq" - -data_link_dir="/usr/local/tq/data" -log_link_dir="/usr/local/tq/log" - -cfg_install_dir="/etc/tq" - -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - -#install main path -install_main_dir="/usr/local/tq" - -# old bin dir -bin_dir="/usr/local/tq/bin" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/tq" - -service_config_dir="/etc/systemd/system" -nginx_port=6060 -nginx_dir="/usr/local/nginxd" - -# Color setting -RED='\033[0;31m' -GREEN='\033[1;32m' -GREEN_DARK='\033[0;32m' -GREEN_UNDERLINE='\033[4;32m' -NC='\033[0m' - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -update_flag=0 - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -if [[ -e /etc/os-release ]]; then - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: -else - osinfo="" -fi -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "This is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then -# echo "This is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then -# echo "This is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then -# echo "This is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then -# echo "This is fedora system" - os_type=2 -else - echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," - echo " if there are any problems with the installation and operation, " - echo " please feel free to contact taosdata.com for support." - os_type=1 -fi - - -# ============================= get input parameters ================================================= - -# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] - -# set parameters by default value -interactiveFqdn=yes # [yes | no] -verType=server # [server | client] -initType=systemd # [systemd | service | ...] - -while getopts "hv:e:i:" arg -do - case $arg in - e) - #echo "interactiveFqdn=$OPTARG" - interactiveFqdn=$( echo $OPTARG ) - ;; - v) - #echo "verType=$OPTARG" - verType=$(echo $OPTARG) - ;; - i) - #echo "initType=$OPTARG" - initType=$(echo $OPTARG) - ;; - h) - echo "Usage: `basename $0` -v [server | client] -e [yes | no]" - exit 0 - ;; - ?) #unknow option - echo "unkonw argument" - exit 1 - ;; - esac -done - -#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" - -function kill_process() { - pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function install_main_path() { - #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin - ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include - ${csudo}mkdir -p ${install_main_dir}/init.d - if [ "$verMode" == "cluster" ]; then - ${csudo}mkdir -p ${nginx_dir} - fi -} - -function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/tq || : - ${csudo}rm -f ${bin_link_dir}/tqd || : - ${csudo}rm -f ${bin_link_dir}/tqdemo || : - ${csudo}rm -f ${bin_link_dir}/rmtq || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : - - ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* - - #Make link - [ -x ${install_main_dir}/bin/tq ] && ${csudo}ln -s ${install_main_dir}/bin/tq ${bin_link_dir}/tq || : - [ -x ${install_main_dir}/bin/tqd ] && ${csudo}ln -s ${install_main_dir}/bin/tqd ${bin_link_dir}/tqd || : - [ -x ${install_main_dir}/bin/tqdemo ] && ${csudo}ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || : - [ -x ${install_main_dir}/bin/remove_tq.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_tq.sh ${bin_link_dir}/rmtq || : - [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : - [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : - - if [ "$verMode" == "cluster" ]; then - ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/* - ${csudo}mkdir -p ${nginx_dir}/logs - ${csudo}chmod 777 ${nginx_dir}/sbin/nginx - fi -} - -function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : - ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* - - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then - ${csudo}ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : - ${csudo}ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - fi - - ${csudo}ldconfig -} - -function install_header() { - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h -} - -function install_jemalloc() { - jemalloc_dir=${script_dir}/jemalloc - - if [ -d ${jemalloc_dir} ]; then - ${csudo}/usr/bin/install -c -d /usr/local/bin - - if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin - fi - if [ -f ${jemalloc_dir}/bin/jeprof ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin - fi - if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib - fi - if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig - fi - fi - if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc - fi - if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 - fi - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi -} - -function add_newHostname_to_hosts() { - localIp="127.0.0.1" - OLD_IFS="$IFS" - IFS=" " - iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') - arr=($iphost) - IFS="$OLD_IFS" - for s in "${arr[@]}" - do - if [[ "$s" == "$localIp" ]]; then - return - fi - done - ${csudo}echo "127.0.0.1 $1" >> /etc/hosts ||: -} - -function set_hostname() { - echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname - while true; do - if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then - break - else - read -p "Please enter one hostname(must not be 'localhost'):" newHostname - fi - done - - ${csudo}hostname $newHostname ||: - retval=`echo $?` - if [[ $retval != 0 ]]; then - echo - echo "set hostname fail!" - return - fi - #echo -e -n "$(hostnamectl status --static)" - #echo -e -n "$(hostnamectl status --transient)" - #echo -e -n "$(hostnamectl status --pretty)" - - #ubuntu/centos /etc/hostname - if [[ -e /etc/hostname ]]; then - ${csudo}echo $newHostname > /etc/hostname ||: - fi - - #debian: #HOSTNAME=yourname - if [[ -e /etc/sysconfig/network ]]; then - ${csudo}sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: - fi - - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/tq.cfg - serverFqdn=$newHostname - - if [[ -e /etc/hosts ]]; then - add_newHostname_to_hosts $newHostname - fi -} - -function is_correct_ipaddr() { - newIp=$1 - OLD_IFS="$IFS" - IFS=" " - arr=($iplist) - IFS="$OLD_IFS" - for s in "${arr[@]}" - do - if [[ "$s" == "$newIp" ]]; then - return 0 - fi - done - - return 1 -} - -function set_ipAsFqdn() { - iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: - if [ -z "$iplist" ]; then - iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: - fi - - if [ -z "$iplist" ]; then - echo - echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" - localFqdn="127.0.0.1" - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/tq.cfg - serverFqdn=$localFqdn - echo - return - fi - - echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" - echo - echo -e -n "${GREEN}$iplist${NC}" - echo - echo - echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" - read localFqdn - while true; do - if [ ! -z "$localFqdn" ]; then - # Check if correct ip address - is_correct_ipaddr $localFqdn - retval=`echo $?` - if [[ $retval != 0 ]]; then - read -p "Please choose an IP from local IP list:" localFqdn - else - # Write the local FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/tq.cfg - serverFqdn=$localFqdn - break - fi - else - read -p "Please choose an IP from local IP list:" localFqdn - fi - done -} - -function local_fqdn_check() { - #serverFqdn=$(hostname) - echo - echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" - echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then - echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" - echo - - while true - do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi - done - fi -} - -function install_config() { - if [ ! -f ${cfg_install_dir}/tq.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/tq.cfg ] && ${csudo}cp ${script_dir}/cfg/tq.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/* - fi - - ${csudo}cp -f ${script_dir}/cfg/tq.cfg ${install_main_dir}/cfg/tq.cfg.org - ${csudo}ln -s ${cfg_install_dir}/tq.cfg ${install_main_dir}/cfg - - [ ! -z $1 ] && return 0 || : # only install client - - if ((${update_flag}==1)); then - return 0 - fi - - if [ "$interactiveFqdn" == "no" ]; then - return 0 - fi - - local_fqdn_check - - #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" - #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" - #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" - #FQDN_PATTERN=":[0-9]{1,5}$" - - # first full-qualified domain name (FQDN) for TQ cluster system - echo - echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TQ cluster node to join${NC}" - echo - echo -e -n "${GREEN}OR leave it blank to build one${NC}:" - read firstEp - while true; do - if [ ! -z "$firstEp" ]; then - # check the format of the firstEp - #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo}sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/tq.cfg - break - #else - # read -p "Please enter the correct FQDN:port: " firstEp - #fi - else - break - fi - done -} - - -function install_log() { - ${csudo}rm -rf ${log_dir} || : - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - - ${csudo}ln -s ${log_dir} ${install_main_dir}/log -} - -function install_data() { - ${csudo}mkdir -p ${data_dir} - - ${csudo}ln -s ${data_dir} ${install_main_dir}/data -} - -function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/* ${install_main_dir}/connector -} - -function install_examples() { - if [ -d ${script_dir}/examples ]; then - ${csudo}cp -rf ${script_dir}/examples/* ${install_main_dir}/examples - fi -} - -function clean_service_on_sysvinit() { - #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start" - #${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || : - - if pidof tqd &> /dev/null; then - ${csudo}service tqd stop || : - fi - - if pidof tarbitrator &> /dev/null; then - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tqd ]; then - ${csudo}chkconfig --del tqd || : - fi - - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tqd ]; then - ${csudo}insserv -r tqd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tqd ]; then - ${csudo}update-rc.d -f tqd remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tqd || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function install_service_on_sysvinit() { - clean_service_on_sysvinit - sleep 1 - - # Install tqd service - - if ((${os_type}==1)); then - ${csudo}cp -f ${script_dir}/init.d/tqd.deb ${install_main_dir}/init.d/tqd - ${csudo}cp ${script_dir}/init.d/tqd.deb ${service_config_dir}/tqd && ${csudo}chmod a+x ${service_config_dir}/tqd - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - elif ((${os_type}==2)); then - ${csudo}cp -f ${script_dir}/init.d/tqd.rpm ${install_main_dir}/init.d/tqd - ${csudo}cp ${script_dir}/init.d/tqd.rpm ${service_config_dir}/tqd && ${csudo}chmod a+x ${service_config_dir}/tqd - ${csudo}cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord - ${csudo}cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo}chmod a+x ${service_config_dir}/tarbitratord - fi - - #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start" - #${csudo}grep -q -F "$restart_config_str" /etc/inittab || ${csudo}bash -c "echo '${restart_config_str}' >> /etc/inittab" - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add tqd || : - ${csudo}chkconfig --level 2345 tqd on || : - ${csudo}chkconfig --add tarbitratord || : - ${csudo}chkconfig --level 2345 tarbitratord on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv tqd || : - ${csudo}insserv -d tqd || : - ${csudo}insserv tarbitratord || : - ${csudo}insserv -d tarbitratord || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d tqd defaults || : - ${csudo}update-rc.d tarbitratord defaults || : - fi -} - -function clean_service_on_systemd() { - tqd_service_config="${service_config_dir}/tqd.service" - if systemctl is-active --quiet tqd; then - echo "TQ is running, stopping it..." - ${csudo}systemctl stop tqd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tqd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tqd_service_config} - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - if systemctl is-active --quiet tarbitratord; then - echo "tarbitrator is running, stopping it..." - ${csudo}systemctl stop tarbitratord &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable tarbitratord &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - if systemctl is-active --quiet nginxd; then - echo "Nginx for TQ is running, stopping it..." - ${csudo}systemctl stop nginxd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${nginx_service_config} - fi -} - -# tq:2345:respawn:/etc/init.d/tqd start - -function install_service_on_systemd() { - clean_service_on_systemd - - tqd_service_config="${service_config_dir}/tqd.service" - ${csudo}bash -c "echo '[Unit]' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'Description=TQ server service' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tqd_service_config}" - ${csudo}bash -c "echo >> ${tqd_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tqd' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'ExecStartPre=/usr/local/tq/bin/startPre.sh' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tqd_service_config}" - ${csudo}bash -c "echo >> ${tqd_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tqd_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tqd_service_config}" - ${csudo}systemctl enable tqd - - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - ${csudo}bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Description=TQ arbitrator service' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo}systemctl enable tarbitratord - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - ${csudo}bash -c "echo '[Unit]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Type=forking' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" - ${csudo}bash -c "echo >> ${nginx_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${nginx_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" - if ! ${csudo}systemctl enable nginxd &> /dev/null; then - ${csudo}systemctl daemon-reexec - ${csudo}systemctl enable nginxd - fi - ${csudo}systemctl start nginxd - fi -} - -function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - # must manual stop tqd - kill_process tqd - fi -} - -vercomp () { - if [[ $1 == $2 ]]; then - return 0 - fi - local IFS=. - local i ver1=($1) ver2=($2) - # fill empty fields in ver1 with zeros - for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do - ver1[i]=0 - done - - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]] - then - # fill empty fields in ver2 with zeros - ver2[i]=0 - fi - if ((10#${ver1[i]} > 10#${ver2[i]})) - then - return 1 - fi - if ((10#${ver1[i]} < 10#${ver2[i]})) - then - return 2 - fi - done - return 0 -} - -function is_version_compatible() { - - curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` - - if [ -f ${script_dir}/driver/vercomp.txt ]; then - min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` - else - min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5) - fi - - vercomp $curr_version $min_compatible_version - case $? in - 0) return 0;; - 1) return 0;; - 2) return 1;; - esac -} - -function update_tq() { - # Start to update - if [ ! -e tq.tar.gz ]; then - echo "File tq.tar.gz does not exist" - exit 1 - fi - tar -zxf tq.tar.gz - install_jemalloc - - # Check if version compatible - if ! is_version_compatible; then - echo -e "${RED}Version incompatible${NC}" - return 1 - fi - - echo -e "${GREEN}Start to update TQ...${NC}" - # Stop the service if running - if pidof tqd &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop tqd || : - elif ((${service_mod}==1)); then - ${csudo}service tqd stop || : - else - kill_process tqd - fi - sleep 1 - fi - if [ "$verMode" == "cluster" ]; then - if pidof nginx &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop nginxd || : - elif ((${service_mod}==1)); then - ${csudo}service nginxd stop || : - else - kill_process nginx - fi - sleep 1 - fi - fi - - install_main_path - - install_log - install_header - install_lib - if [ "$pagMode" != "lite" ]; then - install_connector - fi - install_examples - if [ -z $1 ]; then - install_bin - install_service - install_config - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if openresty is installed - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for TQ is updated successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for TQ does not work! Please try again!\033[0m" - fi - fi - fi - - #echo - #echo -e "\033[44;32;1mTQ is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/tq.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo}systemctl start tqd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo}service tqd start${NC}" - else - echo -e "${GREEN_DARK}To start TQ ${NC}: ./tqd${NC}" - fi - - if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - else - echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq -h $serverFqdn${NC} in shell${NC}" - fi - - echo - echo -e "\033[44;32;1mTQ is updated successfully!${NC}" - else - install_bin - install_config - - echo - echo -e "\033[44;32;1mTQ client is updated successfully!${NC}" - fi - - rm -rf $(tar -tf tq.tar.gz) -} - -function install_tq() { - # Start to install - if [ ! -e tq.tar.gz ]; then - echo "File tq.tar.gz does not exist" - exit 1 - fi - tar -zxf tq.tar.gz - - echo -e "${GREEN}Start to install TQ...${NC}" - - install_main_path - - if [ -z $1 ]; then - install_data - fi - - install_log - install_header - install_lib - install_jemalloc - if [ "$pagMode" != "lite" ]; then - install_connector - fi - install_examples - - if [ -z $1 ]; then # install service and client - # For installing new - install_bin - install_service - - openresty_work=false - if [ "$verMode" == "cluster" ]; then - # Check if nginx is installed successfully - if type curl &> /dev/null; then - if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then - echo -e "\033[44;32;1mNginx for TQ is installed successfully!${NC}" - openresty_work=true - else - echo -e "\033[44;31;5mNginx for TQ does not work! Please try again!\033[0m" - fi - fi - fi - - install_config - - # Ask if to start the service - #echo - #echo -e "\033[44;32;1mTQ is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/tq.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo}systemctl start tqd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo}service tqd start${NC}" - else - echo -e "${GREEN_DARK}To start TQ ${NC}: tqd${NC}" - fi - - #if [ ${openresty_work} = 'true' ]; then - # echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - #else - # echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq${NC} in shell${NC}" - #fi - - if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo - elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $serverFqdn${GREEN_DARK} to login into TQ server${NC}" - echo - fi - echo -e "\033[44;32;1mTQ is installed successfully!${NC}" - echo - else # Only install client - install_bin - install_config - - echo - echo -e "\033[44;32;1mTQ client is installed successfully!${NC}" - fi - - rm -rf $(tar -tf tq.tar.gz) -} - - -## ==============================Main program starts from here============================ -serverFqdn=$(hostname) -if [ "$verType" == "server" ]; then - # Install server and client - if [ -x ${bin_dir}/tqd ]; then - update_flag=1 - update_tq - else - install_tq - fi -elif [ "$verType" == "client" ]; then - interactiveFqdn=no - # Only install client - if [ -x ${bin_dir}/tq ]; then - update_flag=1 - update_tq client - else - install_tq client - fi -else - echo "please input correct verType" -fi diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 76310e225d15132f28006e197981b6a138b77707..a5989df363a45d8e185d9c50e2114452b1b65eb1 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -13,42 +13,52 @@ osType=$3 verNumber=$4 if [ "$osType" != "Darwin" ]; then - script_dir=$(dirname $(readlink -f "$0")) + script_dir=$(dirname $(readlink -f "$0")) else - script_dir=${source_dir}/packaging/tools + script_dir=${source_dir}/packaging/tools fi # Dynamic directory +clientName="taos" +serverName="taosd" +logDir="/var/log/taos" +dataDir="/var/lib/taos" +configDir="/etc/taos" +configFile="taos.cfg" +installDir="/usr/local/taos" +productName="TDengine" +emailName="taosdata.com" +uninstallScript="rmtaos" if [ "$osType" != "Darwin" ]; then - data_dir="/var/lib/taos" - log_dir="/var/log/taos" + data_dir=${dataDir} + log_dir=${logDir} - cfg_install_dir="/etc/taos" + cfg_install_dir=${configDir} - bin_link_dir="/usr/bin" - lib_link_dir="/usr/lib" - lib64_link_dir="/usr/lib64" - inc_link_dir="/usr/include" + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" + inc_link_dir="/usr/include" - install_main_dir="/usr/local/taos" + install_main_dir=${installDir} - bin_dir="/usr/local/taos/bin" + bin_dir="${installDir}/bin" else - data_dir="/usr/local/var/lib/taos" - log_dir="/usr/local/var/log/taos" + data_dir="/usr/local${dataDir}" + log_dir="/usr/local${logDir}" - cfg_install_dir="/usr/local/etc/taos" + cfg_install_dir="/usr/local${configDir}" - bin_link_dir="/usr/local/bin" - lib_link_dir="/usr/local/lib" - inc_link_dir="/usr/local/include" + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" - install_main_dir="/usr/local/Cellar/tdengine/${verNumber}" - install_main_2_dir="/usr/local/Cellar/tdengine@${verNumber}/${verNumber}" + install_main_dir="/usr/local/Cellar/tdengine/${verNumber}" + install_main_2_dir="/usr/local/Cellar/tdengine@${verNumber}/${verNumber}" - bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin" - bin_2_dir="/usr/local/Cellar/tdengine@${verNumber}/${verNumber}/bin" + bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin" + bin_2_dir="/usr/local/Cellar/tdengine@${verNumber}/${verNumber}/bin" fi service_config_dir="/etc/systemd/system" @@ -66,592 +76,582 @@ service_mod=2 os_type=0 if [ "$osType" != "Darwin" ]; then - if command -v sudo > /dev/null; then + if command -v sudo >/dev/null; then csudo="sudo " - fi - initd_mod=0 - if pidof systemd &> /dev/null; then - service_mod=0 - elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi + fi + initd_mod=0 + if pidof systemd &>/dev/null; then + service_mod=0 + elif $(which service &>/dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &>/dev/null); then + initd_mod=1 + elif $(which insserv &>/dev/null); then + initd_mod=2 + elif $(which update-rc.d &>/dev/null); then + initd_mod=3 else - service_mod=2 - fi - - # get the operating system type for using the corresponding init file - # ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification - #osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) - osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) - #echo "osinfo: ${osinfo}" - if echo $osinfo | grep -qwi "ubuntu" ; then - echo "this is ubuntu system" - os_type=1 - elif echo $osinfo | grep -qwi "debian" ; then - echo "this is debian system" - os_type=1 - elif echo $osinfo | grep -qwi "Kylin" ; then - echo "this is Kylin system" - os_type=1 - elif echo $osinfo | grep -qwi "centos" ; then - echo "this is centos system" - os_type=2 - elif echo $osinfo | grep -qwi "fedora" ; then - echo "this is fedora system" - os_type=2 - else - echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, " - echo "please feel free to contact taosdata.com for support." - os_type=1 - fi + service_mod=2 + fi + else + service_mod=2 + fi + + # get the operating system type for using the corresponding init file + # ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification + #osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) + #echo "osinfo: ${osinfo}" + if echo $osinfo | grep -qwi "ubuntu"; then + echo "this is ubuntu system" + os_type=1 + elif echo $osinfo | grep -qwi "debian"; then + echo "this is debian system" + os_type=1 + elif echo $osinfo | grep -qwi "Kylin"; then + echo "this is Kylin system" + os_type=1 + elif echo $osinfo | grep -qwi "centos"; then + echo "this is centos system" + os_type=2 + elif echo $osinfo | grep -qwi "fedora"; then + echo "this is fedora system" + os_type=2 + else + echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, " + echo "please feel free to contact ${emailName} for support." + os_type=1 + fi fi function kill_taosadapter() { - pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi + pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo}kill -9 $pid || : + fi } function kill_taosd() { - ps -ef | grep "taosd" - pid=$(ps -ef | grep -w "taosd" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi + ps -ef | grep ${serverName} + pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo}kill -9 $pid || : + fi } function install_main_path() { - #create install main dir and all sub dir - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -rf ${install_main_dir} || : - ${csudo}mkdir -p ${install_main_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin -# ${csudo}mkdir -p ${install_main_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include -# ${csudo}mkdir -p ${install_main_dir}/init.d - else - ${csudo}rm -rf ${install_main_dir} || ${csudo}rm -rf ${install_main_2_dir} || : - ${csudo}mkdir -p ${install_main_dir} || ${csudo}mkdir -p ${install_main_2_dir} - ${csudo}mkdir -p ${install_main_dir}/cfg || ${csudo}mkdir -p ${install_main_2_dir}/cfg - ${csudo}mkdir -p ${install_main_dir}/bin || ${csudo}mkdir -p ${install_main_2_dir}/bin -# ${csudo}mkdir -p ${install_main_dir}/connector || ${csudo}mkdir -p ${install_main_2_dir}/connector - ${csudo}mkdir -p ${install_main_dir}/driver || ${csudo}mkdir -p ${install_main_2_dir}/driver - ${csudo}mkdir -p ${install_main_dir}/examples || ${csudo}mkdir -p ${install_main_2_dir}/examples - ${csudo}mkdir -p ${install_main_dir}/include || ${csudo}mkdir -p ${install_main_2_dir}/include - fi + #create install main dir and all sub dir + if [ "$osType" != "Darwin" ]; then + ${csudo}rm -rf ${install_main_dir} || : + ${csudo}mkdir -p ${install_main_dir} + ${csudo}mkdir -p ${install_main_dir}/cfg + ${csudo}mkdir -p ${install_main_dir}/bin + # ${csudo}mkdir -p ${install_main_dir}/connector + ${csudo}mkdir -p ${install_main_dir}/driver + ${csudo}mkdir -p ${install_main_dir}/examples + ${csudo}mkdir -p ${install_main_dir}/include + # ${csudo}mkdir -p ${install_main_dir}/init.d + else + ${csudo}rm -rf ${install_main_dir} || ${csudo}rm -rf ${install_main_2_dir} || : + ${csudo}mkdir -p ${install_main_dir} || ${csudo}mkdir -p ${install_main_2_dir} + ${csudo}mkdir -p ${install_main_dir}/cfg || ${csudo}mkdir -p ${install_main_2_dir}/cfg + ${csudo}mkdir -p ${install_main_dir}/bin || ${csudo}mkdir -p ${install_main_2_dir}/bin + # ${csudo}mkdir -p ${install_main_dir}/connector || ${csudo}mkdir -p ${install_main_2_dir}/connector + ${csudo}mkdir -p ${install_main_dir}/driver || ${csudo}mkdir -p ${install_main_2_dir}/driver + ${csudo}mkdir -p ${install_main_dir}/examples || ${csudo}mkdir -p ${install_main_2_dir}/examples + ${csudo}mkdir -p ${install_main_dir}/include || ${csudo}mkdir -p ${install_main_2_dir}/include + fi } function install_bin() { - # Remove links - ${csudo}rm -f ${bin_link_dir}/taos || : - ${csudo}rm -f ${bin_link_dir}/taosd || : - ${csudo}rm -f ${bin_link_dir}/taosadapter || : - ${csudo}rm -f ${bin_link_dir}/taosdemo || : - ${csudo}rm -f ${bin_link_dir}/taosdump || : - - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${bin_link_dir}/perfMonitor || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : - ${csudo}rm -f ${bin_link_dir}/rmtaos || : - - ${csudo}cp -r ${binary_dir}/build/bin/taos ${install_main_dir}/bin || : - [ -f ${binary_dir}/build/bin/taosBenchmark ] && ${csudo}cp -r ${binary_dir}/build/bin/taosBenchmark ${install_main_dir}/bin || : - [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo || : - [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : - [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : - ${csudo}cp -r ${binary_dir}/build/bin/taosd ${install_main_dir}/bin || : - ${csudo}cp -r ${binary_dir}/build/bin/tarbitrator ${install_main_dir}/bin || : - - ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin - ${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin - ${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin - ${csudo}cp -r ${script_dir}/run_taosd.sh ${install_main_dir}/bin - ${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin - - ${csudo}chmod 0555 ${install_main_dir}/bin/* - #Make link - [ -x ${install_main_dir}/bin/taos ] && ${csudo}ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosd ] && ${csudo}ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : - [ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/run_taosd.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || : - [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : - else - - ${csudo}cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin || ${csudo}cp -r ${binary_dir}/build/bin/* ${install_main_2_dir}/bin || : - ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_2_dir} || : - ${csudo}cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin || ${csudo}cp -r ${script_dir}/remove_client.sh ${install_main_2_dir}/bin - ${csudo}chmod 0555 ${install_main_dir}/bin/* || ${csudo}chmod 0555 ${install_main_2_dir}/bin/* - #Make link - [ -x ${install_main_dir}/bin/taos ] || [ -x ${install_main_2_dir}/bin/taos ] && ${csudo}ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || ${csudo}ln -s ${install_main_2_dir}/bin/taos || : - [ -x ${install_main_dir}/bin/taosd ] || [ -x ${install_main_2_dir}/bin/taosd ] && ${csudo}ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || ${csudo}ln -s ${install_main_2_dir}/bin/taosd || : - [ -x ${install_main_dir}/bin/taosadapter ] || [ -x ${install_main_2_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || ${csudo}ln -s ${install_main_2_dir}/bin/taosadapter || : - [ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - fi + # Remove links + ${csudo}rm -f ${bin_link_dir}/${clientName} || : + ${csudo}rm -f ${bin_link_dir}/${serverName} || : + ${csudo}rm -f ${bin_link_dir}/taosadapter || : + ${csudo}rm -f ${bin_link_dir}/taosdemo || : + ${csudo}rm -f ${bin_link_dir}/taosdump || : + + if [ "$osType" != "Darwin" ]; then + ${csudo}rm -f ${bin_link_dir}/perfMonitor || : + ${csudo}rm -f ${bin_link_dir}/set_core || : + ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + + ${csudo}cp -r ${binary_dir}/build/bin/${clientName} ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/taosBenchmark ] && ${csudo}cp -r ${binary_dir}/build/bin/taosBenchmark ${install_main_dir}/bin || : + [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo || : + [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : + ${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || : + ${csudo}cp -r ${binary_dir}/build/bin/tarbitrator ${install_main_dir}/bin || : + + ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin + ${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + ${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin + ${csudo}cp -r ${script_dir}/run_taosd_and_taosadapter.sh ${install_main_dir}/bin + ${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin + + ${csudo}chmod 0555 ${install_main_dir}/bin/* + #Make link + [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || : + [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || : + [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : + [ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || : + [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : + else + + ${csudo}cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin || ${csudo}cp -r ${binary_dir}/build/bin/* ${install_main_2_dir}/bin || : + ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_2_dir} || : + ${csudo}cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin || ${csudo}cp -r ${script_dir}/remove_client.sh ${install_main_2_dir}/bin + ${csudo}chmod 0555 ${install_main_dir}/bin/* || ${csudo}chmod 0555 ${install_main_2_dir}/bin/* + #Make link + [ -x ${install_main_dir}/bin/${clientName} ] || [ -x ${install_main_2_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || ${csudo}ln -s ${install_main_2_dir}/bin/${clientName} || : + [ -x ${install_main_dir}/bin/${serverName} ] || [ -x ${install_main_2_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || ${csudo}ln -s ${install_main_2_dir}/bin/${serverName} || : + [ -x ${install_main_dir}/bin/taosadapter ] || [ -x ${install_main_2_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || ${csudo}ln -s ${install_main_2_dir}/bin/taosadapter || : + [ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + fi } function install_jemalloc() { - if [ "$osType" != "Darwin" ]; then - /usr/bin/install -c -d /usr/local/bin - - if [ -f "${binary_dir}/build/bin/jemalloc-config" ]; then - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin - fi - if [ -f "${binary_dir}/build/bin/jemalloc.sh" ]; then - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin - fi - if [ -f "${binary_dir}/build/bin/jeprof" ]; then - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin - fi - if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then - ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\ - /usr/local/include/jemalloc - fi - if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib - ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so - ${csudo}/usr/bin/install -c -d /usr/local/lib - [ -f ${binary_dir}/build/lib/libjemalloc.a ] && - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib - [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] && - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib - if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then - ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig - ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\ - /usr/local/lib/pkgconfig - fi - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi - if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc - ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\ - /usr/local/share/doc/jemalloc - fi - if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then - ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 - ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\ - /usr/local/share/man/man3 - fi + if [ "$osType" != "Darwin" ]; then + /usr/bin/install -c -d /usr/local/bin + + if [ -f "${binary_dir}/build/bin/jemalloc-config" ]; then + ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin + fi + if [ -f "${binary_dir}/build/bin/jemalloc.sh" ]; then + ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin + fi + if [ -f "${binary_dir}/build/bin/jeprof" ]; then + ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin + fi + if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then + ${csudo}/usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h \ + /usr/local/include/jemalloc + fi + if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib + ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib + ${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo}/usr/bin/install -c -d /usr/local/lib + [ -f ${binary_dir}/build/lib/libjemalloc.a ] && + ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib + [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] && + ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib + if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then + ${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc \ + /usr/local/lib/pkgconfig + fi + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo}tee /etc/ld.so.conf.d/jemalloc.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf" + ${csudo}ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi + if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html \ + /usr/local/share/doc/jemalloc + fi + if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then + ${csudo}/usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 \ + /usr/local/share/man/man3 fi + fi } function install_avro() { - if [ "$osType" != "Darwin" ]; then - if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then - ${csudo}/usr/bin/install -c -d /usr/local/$1 - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1 - ${csudo}ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23 - ${csudo}ln -sf libavro.so.23 /usr/local/$1/libavro.so - ${csudo}/usr/bin/install -c -d /usr/local/$1 - [ -f ${binary_dir}/build/$1/libavro.a ] && - ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1 - - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/$1" | ${csudo}tee /etc/ld.so.conf.d/libavro.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf" - ${csudo}ldconfig - else - echo "/etc/ld.so.conf.d not found!" - fi - fi + if [ "$osType" != "Darwin" ]; then + if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then + ${csudo}/usr/bin/install -c -d /usr/local/$1 + ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1 + ${csudo}ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23 + ${csudo}ln -sf libavro.so.23 /usr/local/$1/libavro.so + ${csudo}/usr/bin/install -c -d /usr/local/$1 + [ -f ${binary_dir}/build/$1/libavro.a ] && + ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1 + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/$1" | ${csudo}tee /etc/ld.so.conf.d/libavro.conf >/dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf" + ${csudo}ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi fi + fi } function install_lib() { - # Remove links - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - fi - - if [ "$osType" != "Darwin" ]; then - ${csudo}cp ${binary_dir}/build/lib/libtaos.so.${verNumber} \ - ${install_main_dir}/driver \ - && ${csudo}chmod 777 ${install_main_dir}/driver/* - - ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - - if [ -d "${lib64_link_dir}" ]; then - ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 - ${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so - fi - else - ${csudo}cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib \ - ${install_main_dir}/driver \ - || ${csudo}cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib \ - ${install_main_2_dir}/driver \ - && ${csudo}chmod 777 ${install_main_dir}/driver/* \ - || ${csudo}chmod 777 ${install_main_2_dir}/driver/* - - ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* \ - ${install_main_dir}/driver/libtaos.1.dylib \ - || ${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.* \ - ${install_main_2_dir}/driver/libtaos.1.dylib || : - - ${csudo}ln -sf ${install_main_dir}/driver/libtaos.1.dylib \ - ${install_main_dir}/driver/libtaos.dylib \ - || ${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.1.dylib \ - ${install_main_2_dir}/driver/libtaos.dylib || : - - ${csudo}ln -sf ${install_main_dir}/driver/libtaos.${verNumber}.dylib \ - ${lib_link_dir}/libtaos.1.dylib \ - || ${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.${verNumber}.dylib \ - ${lib_link_dir}/libtaos.1.dylib || : - - ${csudo}ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib || : - fi - - install_jemalloc - install_avro lib - install_avro lib64 - - if [ "$osType" != "Darwin" ]; then - ${csudo}ldconfig - fi + # Remove links + ${csudo}rm -f ${lib_link_dir}/libtaos.* || : + if [ "$osType" != "Darwin" ]; then + ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + fi + + if [ "$osType" != "Darwin" ]; then + ${csudo}cp ${binary_dir}/build/lib/libtaos.so.${verNumber} \ + ${install_main_dir}/driver && + ${csudo}chmod 777 ${install_main_dir}/driver/* + + ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 + ${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so + fi + else + ${csudo}cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib \ + ${install_main_dir}/driver || + ${csudo}cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib \ + ${install_main_2_dir}/driver && + ${csudo}chmod 777 ${install_main_dir}/driver/* || + ${csudo}chmod 777 ${install_main_2_dir}/driver/* + + ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* \ + ${install_main_dir}/driver/libtaos.1.dylib || + ${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.* \ + ${install_main_2_dir}/driver/libtaos.1.dylib || : + + ${csudo}ln -sf ${install_main_dir}/driver/libtaos.1.dylib \ + ${install_main_dir}/driver/libtaos.dylib || + ${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.1.dylib \ + ${install_main_2_dir}/driver/libtaos.dylib || : + + ${csudo}ln -sf ${install_main_dir}/driver/libtaos.${verNumber}.dylib \ + ${lib_link_dir}/libtaos.1.dylib || + ${csudo}ln -sf ${install_main_2_dir}/driver/libtaos.${verNumber}.dylib \ + ${lib_link_dir}/libtaos.1.dylib || : + + ${csudo}ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib || : + fi + + install_jemalloc + install_avro lib + install_avro lib64 + + if [ "$osType" != "Darwin" ]; then + ${csudo}ldconfig + fi } function install_header() { - if [ "$osType" != "Darwin" ]; then - ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : - ${csudo}cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ - ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* - ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h - ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h - ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h - else - ${csudo}cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ - ${install_main_dir}/include \ - || ${csudo}cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ - ${install_main_2_dir}/include \ - && ${csudo}chmod 644 ${install_main_dir}/include/* \ - || ${csudo}chmod 644 ${install_main_2_dir}/include/* - fi + if [ "$osType" != "Darwin" ]; then + ${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || : + ${csudo}cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ + ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/* + ${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h + ${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h + else + ${csudo}cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ + ${install_main_dir}/include || + ${csudo}cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \ + ${install_main_2_dir}/include && + ${csudo}chmod 644 ${install_main_dir}/include/* || + ${csudo}chmod 644 ${install_main_2_dir}/include/* + fi } function install_config() { - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/../cfg/taos.cfg ] && - ${csudo}cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir} - ${csudo}chmod 644 ${cfg_install_dir}/taos.cfg - ${csudo}cp -f ${script_dir}/../cfg/taos.cfg \ - ${cfg_install_dir}/taos.cfg.${verNumber} - ${csudo}ln -s ${cfg_install_dir}/taos.cfg \ - ${install_main_dir}/cfg/taos.cfg - else - ${csudo}cp -f ${script_dir}/../cfg/taos.cfg \ - ${cfg_install_dir}/taos.cfg.${verNumber} - fi + if [ ! -f ${cfg_install_dir}/${configFile} ]; then + ${csudo}mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/../cfg/${configFile} ] && + ${csudo}cp ${script_dir}/../cfg/${configFile} ${cfg_install_dir} + ${csudo}chmod 644 ${cfg_install_dir}/${configFile} + ${csudo}cp -f ${script_dir}/../cfg/${configFile} \ + ${cfg_install_dir}/${configFile}.${verNumber} + ${csudo}ln -s ${cfg_install_dir}/${configFile} \ + ${install_main_dir}/cfg/${configFile} + else + ${csudo}cp -f ${script_dir}/../cfg/${configFile} \ + ${cfg_install_dir}/${configFile}.${verNumber} + fi } function install_taosadapter_config() { - if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then - ${csudo}mkdir -p ${cfg_install_dir} - [ -f ${binary_dir}/test/cfg/taosadapter.toml ] && - ${csudo}cp ${binary_dir}/test/cfg/taosadapter.toml ${cfg_install_dir} - [ -f ${cfg_install_dir}/taosadapter.toml ] && - ${csudo}chmod 644 ${cfg_install_dir}/taosadapter.toml - [ -f ${binary_dir}/test/cfg/taosadapter.toml ] && - ${csudo}cp -f ${binary_dir}/test/cfg/taosadapter.toml \ - ${cfg_install_dir}/taosadapter.toml.${verNumber} - [ -f ${cfg_install_dir}/taosadapter.toml ] && \ - ${csudo}ln -s ${cfg_install_dir}/taosadapter.toml \ - ${install_main_dir}/cfg/taosadapter.toml - else - if [ -f "${binary_dir}/test/cfg/taosadapter.toml" ]; then - ${csudo}cp -f ${binary_dir}/test/cfg/taosadapter.toml \ - ${cfg_install_dir}/taosadapter.toml.${verNumber} - fi - fi + if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then + ${csudo}mkdir -p ${cfg_install_dir} + [ -f ${binary_dir}/test/cfg/taosadapter.toml ] && + ${csudo}cp ${binary_dir}/test/cfg/taosadapter.toml ${cfg_install_dir} + [ -f ${cfg_install_dir}/taosadapter.toml ] && + ${csudo}chmod 644 ${cfg_install_dir}/taosadapter.toml + [ -f ${binary_dir}/test/cfg/taosadapter.toml ] && + ${csudo}cp -f ${binary_dir}/test/cfg/taosadapter.toml \ + ${cfg_install_dir}/taosadapter.toml.${verNumber} + [ -f ${cfg_install_dir}/taosadapter.toml ] && + ${csudo}ln -s ${cfg_install_dir}/taosadapter.toml \ + ${install_main_dir}/cfg/taosadapter.toml + else + if [ -f "${binary_dir}/test/cfg/taosadapter.toml" ]; then + ${csudo}cp -f ${binary_dir}/test/cfg/taosadapter.toml \ + ${cfg_install_dir}/taosadapter.toml.${verNumber} + fi + fi } function install_log() { - ${csudo}rm -rf ${log_dir} || : - ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} - if [ "$osType" != "Darwin" ]; then - ${csudo}ln -s ${log_dir} ${install_main_dir}/log - else - ${csudo}ln -s ${log_dir} ${install_main_dir}/log || ${csudo}ln -s ${log_dir} ${install_main_2_dir}/log - fi + ${csudo}rm -rf ${log_dir} || : + ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} + if [ "$osType" != "Darwin" ]; then + ${csudo}ln -s ${log_dir} ${install_main_dir}/log + else + ${csudo}ln -s ${log_dir} ${install_main_dir}/log || ${csudo}ln -s ${log_dir} ${install_main_2_dir}/log + fi } function install_data() { - ${csudo}mkdir -p ${data_dir} - if [ "$osType" != "Darwin" ]; then - ${csudo}ln -s ${data_dir} ${install_main_dir}/data - else - ${csudo}ln -s ${data_dir} ${install_main_dir}/data || ${csudo}ln -s ${data_dir} ${install_main_2_dir}/data - fi + ${csudo}mkdir -p ${data_dir} + if [ "$osType" != "Darwin" ]; then + ${csudo}ln -s ${data_dir} ${install_main_dir}/data + else + ${csudo}ln -s ${data_dir} ${install_main_dir}/data || ${csudo}ln -s ${data_dir} ${install_main_2_dir}/data + fi } function install_connector() { - if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then - ${csudo}cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector - else - echo "WARNING: go connector not found, please check if want to use it!" - fi - if [ "$osType" != "Darwin" ]; then - ${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector - ${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo}chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null - else - ${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector || ${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_2_dir}/connector - ${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo}chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null - ${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_2_dir}/connector &> /dev/null && ${csudo}chmod 777 ${install_main_2_dir}/connector/*.jar || echo &> /dev/null - fi + if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then + ${csudo}cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector + else + echo "WARNING: go connector not found, please check if want to use it!" + fi + if [ "$osType" != "Darwin" ]; then + ${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector + ${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &>/dev/null && ${csudo}chmod 777 ${install_main_dir}/connector/*.jar || echo &>/dev/null + else + ${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector || ${csudo}cp -rf ${source_dir}/src/connector/python ${install_main_2_dir}/connector + ${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &>/dev/null && ${csudo}chmod 777 ${install_main_dir}/connector/*.jar || echo &>/dev/null + ${csudo}cp ${binary_dir}/build/lib/*.jar ${install_main_2_dir}/connector &>/dev/null && ${csudo}chmod 777 ${install_main_2_dir}/connector/*.jar || echo &>/dev/null + fi } function install_examples() { - if [ "$osType" != "Darwin" ]; then - ${csudo}cp -rf ${source_dir}/tests/examples/* ${install_main_dir}/examples - else - ${csudo}cp -rf ${source_dir}/tests/examples/* ${install_main_dir}/examples || ${csudo}cp -rf ${source_dir}/tests/examples/* ${install_main_2_dir}/examples - fi + if [ "$osType" != "Darwin" ]; then + ${csudo}cp -rf ${source_dir}/tests/examples/* ${install_main_dir}/examples + else + ${csudo}cp -rf ${source_dir}/tests/examples/* ${install_main_dir}/examples || ${csudo}cp -rf ${source_dir}/tests/examples/* ${install_main_2_dir}/examples + fi } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || : - - if pidof taosd &> /dev/null; then - ${csudo}service taosd stop || : - fi - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --del taosd || : - elif ((${initd_mod}==2)); then - ${csudo}insserv -r taosd || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d -f taosd remove || : - fi - - ${csudo}rm -f ${service_config_dir}/taosd || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi + if pidof ${serverName} &>/dev/null; then + ${csudo}service ${serverName} stop || : + fi + + if ((${initd_mod} == 1)); then + ${csudo}chkconfig --del ${serverName} || : + elif ((${initd_mod} == 2)); then + ${csudo}insserv -r ${serverName} || : + elif ((${initd_mod} == 3)); then + ${csudo}update-rc.d -f ${serverName} remove || : + fi + + ${csudo}rm -f ${service_config_dir}/${serverName} || : + + if $(which init &>/dev/null); then + ${csudo}init q || : + fi } function install_service_on_sysvinit() { - clean_service_on_sysvinit - - sleep 1 - - # Install taosd service - if ((${os_type}==1)); then -# ${csudo}cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d - ${csudo}cp ${script_dir}/../deb/taosd ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/taosd - elif ((${os_type}==2)); then -# ${csudo}cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d - ${csudo}cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/taosd - fi - - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo}grep -q -F "$restart_config_str" /etc/inittab || ${csudo}bash -c "echo '${restart_config_str}' >> /etc/inittab" - - if ((${initd_mod}==1)); then - ${csudo}chkconfig --add taosd || : - ${csudo}chkconfig --level 2345 taosd on || : - elif ((${initd_mod}==2)); then - ${csudo}insserv taosd || : - ${csudo}insserv -d taosd || : - elif ((${initd_mod}==3)); then - ${csudo}update-rc.d taosd defaults || : - fi + clean_service_on_sysvinit + + sleep 1 + + if ((${os_type} == 1)); then + # ${csudo}cp -f ${script_dir}/../deb/${serverName} ${install_main_dir}/init.d + ${csudo}cp ${script_dir}/../deb/${serverName} ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/${serverName} + elif ((${os_type} == 2)); then + # ${csudo}cp -f ${script_dir}/../rpm/${serverName} ${install_main_dir}/init.d + ${csudo}cp ${script_dir}/../rpm/${serverName} ${service_config_dir} && ${csudo}chmod a+x ${service_config_dir}/${serverName} + fi + + if ((${initd_mod} == 1)); then + ${csudo}chkconfig --add ${serverName} || : + ${csudo}chkconfig --level 2345 ${serverName} on || : + elif ((${initd_mod} == 2)); then + ${csudo}insserv ${serverName} || : + ${csudo}insserv -d ${serverName} || : + elif ((${initd_mod} == 3)); then + ${csudo}update-rc.d ${serverName} defaults || : + fi } function clean_service_on_systemd() { - taosd_service_config="${service_config_dir}/taosd.service" + taosd_service_config="${service_config_dir}/${serverName}.service" - if systemctl is-active --quiet taosd; then - echo "TDengine is running, stopping it..." - ${csudo}systemctl stop taosd &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable taosd &> /dev/null || echo &> /dev/null + if systemctl is-active --quiet ${serverName}; then + echo "${productName} is running, stopping it..." + ${csudo}systemctl stop ${serverName} &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable ${serverName} &>/dev/null || echo &>/dev/null - ${csudo}rm -f ${taosd_service_config} + ${csudo}rm -f ${taosd_service_config} } -# taos:2345:respawn:/etc/init.d/taosd start - function install_service_on_systemd() { - clean_service_on_systemd - - taosd_service_config="${service_config_dir}/taosd.service" - - ${csudo}bash -c "echo '[Unit]' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo}bash -c "echo >> ${taosd_service_config}" - ${csudo}bash -c "echo '[Service]' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'Type=simple' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'Restart=always' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" - ${csudo}bash -c "echo >> ${taosd_service_config}" - ${csudo}bash -c "echo '[Install]' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" - ${csudo}systemctl enable taosd + clean_service_on_systemd + + taosd_service_config="${service_config_dir}/${serverName}.service" + + ${csudo}bash -c "echo '[Unit]' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'Description=${productName} server service' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + ${csudo}bash -c "echo >> ${taosd_service_config}" + ${csudo}bash -c "echo '[Service]' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'Type=simple' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'ExecStart=/usr/bin/${serverName}' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'ExecStartPre=${installDir}/bin/startPre.sh' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'Restart=always' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" + ${csudo}bash -c "echo >> ${taosd_service_config}" + ${csudo}bash -c "echo '[Install]' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" + ${csudo}systemctl enable ${serverName} } function install_taosadapter_service() { - if ((${service_mod}==0)); then - [ -f ${binary_dir}/test/cfg/taosadapter.service ] &&\ - ${csudo}cp ${binary_dir}/test/cfg/taosadapter.service\ - ${service_config_dir}/ || : - ${csudo}systemctl daemon-reload - fi + if ((${service_mod} == 0)); then + [ -f ${binary_dir}/test/cfg/taosadapter.service ] && + ${csudo}cp ${binary_dir}/test/cfg/taosadapter.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + fi } function install_service() { - if ((${service_mod}==0)); then - install_service_on_systemd - elif ((${service_mod}==1)); then - install_service_on_sysvinit - else - # must manual stop taosd - kill_taosd - fi + if ((${service_mod} == 0)); then + install_service_on_systemd + elif ((${service_mod} == 1)); then + install_service_on_sysvinit + else + kill_taosd + fi } function update_TDengine() { - echo -e "${GREEN}Start to update TDengine...${NC}" - # Stop the service if running - - if pidof taosd &> /dev/null; then - if ((${service_mod}==0)); then - ${csudo}systemctl stop taosd || : - elif ((${service_mod}==1)); then - ${csudo}service taosd stop || : - else - kill_taosadapter - kill_taosd - fi - sleep 1 - fi - - install_main_path - - install_log - install_header - install_lib -# install_connector - install_examples - install_bin - - install_service - install_taosadapter_service - - install_config - install_taosadapter_config - - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" - echo - - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit /etc/taos/taosadapter.toml" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}service taosd start${NC}" + echo -e "${GREEN}Start to update ${productName}...${NC}" + # Stop the service if running + + if pidof ${serverName} &>/dev/null; then + if ((${service_mod} == 0)); then + ${csudo}systemctl stop ${serverName} || : + elif ((${service_mod} == 1)); then + ${csudo}service ${serverName} stop || : else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" - echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" + kill_taosadapter + kill_taosd fi - - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + # install_connector + install_examples + install_bin + + install_service + install_taosadapter_service + + install_config + install_taosadapter_config + + echo + echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" + echo + + echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" + echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + if ((${service_mod} == 0)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + elif ((${service_mod} == 1)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + else + echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" + fi + + echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}" + echo + echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" } function install_TDengine() { - # Start to install - echo -e "${GREEN}Start to install TDengine...${NC}" - - install_main_path - - install_data - install_log - install_header - install_lib -# install_connector - install_examples - install_bin - - install_service - install_taosadapter_service - - install_config - install_taosadapter_config - - # Ask if to start the service - echo - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit /etc/taos/taosadapter.toml" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo}service taosd start${NC}" - else - echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" - fi - - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - echo - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + # Start to install + echo -e "${GREEN}Start to install ${productName}...${NC}" + + install_main_path + + install_data + install_log + install_header + install_lib + # install_connector + install_examples + install_bin + + install_service + install_taosadapter_service + + install_config + install_taosadapter_config + + # Ask if to start the service + echo + echo -e "\033[44;32;1m${productName} is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" + echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit ${configDir}/taosadapter.toml" + if ((${service_mod} == 0)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" + elif ((${service_mod} == 1)); then + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" + else + echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}" + echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}" + fi + + echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}" + echo + echo -e "\033[44;32;1m${productName} is installed successfully!${NC}" } ## ==============================Main program starts from here============================ echo source directory: $1 echo binary directory: $2 if [ "$osType" != "Darwin" ]; then - if [ -x ${bin_dir}/taos ]; then - update_TDengine - else - install_TDengine - fi + if [ -x ${bin_dir}/${clientName} ]; then + update_TDengine + else + install_TDengine + fi else - if [ -x ${bin_dir}/taos ] || [ -x ${bin_2_dir}/taos ]; then - update_TDengine - else - install_TDengine - fi + if [ -x ${bin_dir}/${clientName} ] || [ -x ${bin_2_dir}/${clientName} ]; then + update_TDengine + else + install_TDengine + fi fi diff --git a/packaging/tools/makearbi.sh b/packaging/tools/makearbi.sh index d654910480e52b99e040df09e1fb9ecedbe5cad5..6dacfdd90d4499ac1f79c1eb31f9c786e5c862df 100755 --- a/packaging/tools/makearbi.sh +++ b/packaging/tools/makearbi.sh @@ -18,6 +18,8 @@ pagMode=$8 script_dir="$(dirname $(readlink -f $0))" top_dir="$(readlink -f ${script_dir}/../..)" +productName="TDengine" + # create compressed install file. build_dir="${compile_dir}/build" code_dir="${top_dir}/src" @@ -25,9 +27,9 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TDengine-enterprise-arbitrator-${version}" + install_dir="${release_dir}/${productName}-enterprise-arbitrator-${version}" else - install_dir="${release_dir}/TDengine-arbitrator-${version}" + install_dir="${release_dir}/${productName}-arbitrator-${version}" fi # Directories and files. @@ -45,22 +47,13 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : -cd ${release_dir} +cd ${release_dir} # install_dir has been distinguishes cluster from edege, so comments this code pkg_name=${install_dir}-${osType}-${cpuType} -# if [ "$verMode" == "cluster" ]; then -# pkg_name=${install_dir}-${osType}-${cpuType} -# elif [ "$verMode" == "edge" ]; then -# pkg_name=${install_dir}-${osType}-${cpuType} -# else -# echo "unknow verMode, nor cluster or edge" -# exit 1 -# fi - if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then - pkg_name=${install_dir}-${verType}-${osType}-${cpuType} + pkg_name=${install_dir}-${verType}-${osType}-${cpuType} elif [ "$verType" == "stable" ]; then pkg_name=${pkg_name} else @@ -68,12 +61,11 @@ else exit 1 fi - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : exitcode=$? if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode fi cd ${curr_dir} diff --git a/packaging/tools/makearbi_jh.sh b/packaging/tools/makearbi_jh.sh deleted file mode 100755 index 5457b163599421d0a5917156efde1c8814a6f514..0000000000000000000000000000000000000000 --- a/packaging/tools/makearbi_jh.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash -# -# Generate arbitrator's tar.gz setup package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/jh_iot-enterprise-arbitrator-${version}" -else - install_dir="${release_dir}/jh_iot-arbitrator-${version}" -fi - -# Directories and files. -bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_jh.sh" -install_files="${script_dir}/install_arbi_jh.sh" - -init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord -init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord - -# make directories. -mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_jh.sh || : -mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makearbi_kh.sh b/packaging/tools/makearbi_kh.sh deleted file mode 100755 index c7fa40eb4f1fc4003e6a584bdc5c4534616754d6..0000000000000000000000000000000000000000 --- a/packaging/tools/makearbi_kh.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -# -# Generate arbitrator's tar.gz setup package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/KingHistorian-enterprise-arbitrator-${version}" -else - install_dir="${release_dir}/KingHistorian-arbitrator-${version}" -fi - -# Directories and files. -bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_kh.sh" -install_files="${script_dir}/install_arbi_kh.sh" - -init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord -init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord - -# make directories. -mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_kh.sh || : -#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || : -mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makearbi_power.sh b/packaging/tools/makearbi_power.sh deleted file mode 100755 index a942a7860dd4fd0a6590fceadc00abfc19815414..0000000000000000000000000000000000000000 --- a/packaging/tools/makearbi_power.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -# -# Generate arbitrator's tar.gz setup package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/PowerDB-enterprise-arbitrator-${version}" -else - install_dir="${release_dir}/PowerDB-arbitrator-${version}" -fi - -# Directories and files. -bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_power.sh" -install_files="${script_dir}/install_arbi_power.sh" - -init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord -init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord - -# make directories. -mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_power.sh || : -#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || : -mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makearbi_pro.sh b/packaging/tools/makearbi_pro.sh deleted file mode 100755 index c432e97d4762da7a5a68672c46e118f76c59ae20..0000000000000000000000000000000000000000 --- a/packaging/tools/makearbi_pro.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -# -# Generate arbitrator's tar.gz setup package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/ProDB-enterprise-arbitrator-${version}" -else - install_dir="${release_dir}/ProDB-arbitrator-${version}" -fi - -# Directories and files. -bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_pro.sh" -install_files="${script_dir}/install_arbi_pro.sh" - -init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord -init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord - -# make directories. -mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_pro.sh || : -#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || : -mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makearbi_tq.sh b/packaging/tools/makearbi_tq.sh deleted file mode 100755 index 3460696b08c11815a68edc12a61d53f2651d699a..0000000000000000000000000000000000000000 --- a/packaging/tools/makearbi_tq.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -# -# Generate arbitrator's tar.gz setup package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TQ-enterprise-arbitrator-${version}" -else - install_dir="${release_dir}/TQ-arbitrator-${version}" -fi - -# Directories and files. -bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_tq.sh" -install_files="${script_dir}/install_arbi_tq.sh" - -init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord -init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord - -# make directories. -mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_tq.sh || : -#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || : -mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 92d3f8a89cf3d985ca9149fdb9d910949285d5d8..3f0ce19eb9c7d4d2b41a723c437514433775d19b 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -14,14 +14,19 @@ verMode=$6 verType=$7 pagMode=$8 +productName="TDengine" +clientName="taos" +configFile="taos.cfg" +tarName="taos.tar.gz" + if [ "$osType" != "Darwin" ]; then - script_dir="$(dirname $(readlink -f $0))" - top_dir="$(readlink -f ${script_dir}/../..)" + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - top_dir=${script_dir}/../.. + script_dir=$(dirname $0) + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. fi # create compressed install file. @@ -32,29 +37,27 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TDengine-enterprise-client-${version}" + install_dir="${release_dir}/${productName}-enterprise-client-${version}" else - install_dir="${release_dir}/TDengine-client-${version}" + install_dir="${release_dir}/${productName}-client-${version}" fi # Directories and files. if [ "$osType" != "Darwin" ]; then if [ "$pagMode" == "lite" ]; then - #strip ${build_dir}/bin/taosd - strip ${build_dir}/bin/taos - bin_files="${build_dir}/bin/taos \ + strip ${build_dir}/bin/${clientName} + bin_files="${build_dir}/bin/${clientName} \ ${script_dir}/remove_client.sh" else - bin_files="${build_dir}/bin/taos \ + bin_files="${build_dir}/bin/${clientName} \ ${script_dir}/remove_client.sh \ ${script_dir}/set_core.sh \ ${script_dir}/get_client.sh" - #${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" fi lib_files="${build_dir}/lib/libtaos.so.${version}" else - bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" + bin_files="${build_dir}/bin/${clientName} ${script_dir}/remove_client.sh" lib_files="${build_dir}/lib/libtaos.${version}.dylib" fi @@ -70,77 +73,77 @@ install_files="${script_dir}/install_client.sh" # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile} mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* if [ -f ${build_dir}/bin/jemalloc-config ]; then - mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} - cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin - if [ -f ${build_dir}/bin/jemalloc.sh ]; then - cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/bin/jeprof ]; then - cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then - cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc - fi - if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then - cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib - ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so - fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then - cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig - fi - if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then - cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc - fi - if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then - cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 - fi + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi fi cd ${install_dir} if [ "$osType" != "Darwin" ]; then - tar -zcv -f taos.tar.gz * --remove-files || : + tar -zcv -f ${tarName} * --remove-files || : else - tar -zcv -f taos.tar.gz * || : - mv taos.tar.gz .. - rm -rf ./* - mv ../taos.tar.gz . + tar -zcv -f ${tarName} * || : + mv ${tarName} .. + rm -rf ./* + mv ../${tarName} . fi cd ${curr_dir} cp ${install_files} ${install_dir} if [ "$osType" == "Darwin" ]; then - sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >> install_client_temp.sh - mv install_client_temp.sh ${install_dir}/install_client.sh + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >>install_client_temp.sh + mv install_client_temp.sh ${install_dir}/install_client.sh fi if [ "$pagMode" == "lite" ]; then - sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >> install_client_temp.sh - mv install_client_temp.sh ${install_dir}/install_client.sh + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >>install_client_temp.sh + mv install_client_temp.sh ${install_dir}/install_client.sh fi chmod a+x ${install_dir}/install_client.sh # Copy example code mkdir -p ${install_dir}/examples examples_dir="${top_dir}/tests/examples" -cp -r ${examples_dir}/c ${install_dir}/examples +cp -r ${examples_dir}/c ${install_dir}/examples if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp -r ${examples_dir}/JDBC ${install_dir}/examples + cp -r ${examples_dir}/JDBC ${install_dir}/examples cp -r ${examples_dir}/matlab ${install_dir}/examples cp -r ${examples_dir}/python ${install_dir}/examples - cp -r ${examples_dir}/R ${install_dir}/examples - cp -r ${examples_dir}/go ${install_dir}/examples + cp -r ${examples_dir}/R ${install_dir}/examples + cp -r ${examples_dir}/go ${install_dir}/examples cp -r ${examples_dir}/nodejs ${install_dir}/examples - cp -r ${examples_dir}/C# ${install_dir}/examples + cp -r ${examples_dir}/C# ${install_dir}/examples fi # Copy driver mkdir -p ${install_dir}/driver @@ -152,15 +155,15 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: + cp ${build_dir}/lib/*.jar ${install_dir}/connector || : fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else echo "WARNING: go connector not found, please check if want to use it!" fi - cp -r ${connector_dir}/python ${install_dir}/connector - cp -r ${connector_dir}/nodejs ${install_dir}/connector + cp -r ${connector_dir}/python ${install_dir}/connector + cp -r ${connector_dir}/nodejs ${install_dir}/connector fi # Copy release note # cp ${script_dir}/release_note ${install_dir} @@ -195,12 +198,12 @@ if [ "$pagMode" == "lite" ]; then fi if [ "$osType" != "Darwin" ]; then - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : else - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : - mv "$(basename ${pkg_name}).tar.gz" .. - rm -rf ./* - mv ../"$(basename ${pkg_name}).tar.gz" . + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . fi cd ${curr_dir} diff --git a/packaging/tools/makeclient_jh.sh b/packaging/tools/makeclient_jh.sh deleted file mode 100755 index bfbdcfc578bc7f8dfb15fef302d9817014ff3bef..0000000000000000000000000000000000000000 --- a/packaging/tools/makeclient_jh.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for linux client in all os system -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -if [ "$osType" != "Darwin" ]; then - script_dir="$(dirname $(readlink -f $0))" - top_dir="$(readlink -f ${script_dir}/../..)" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - top_dir=${script_dir}/../.. -fi - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' - -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/jh_iot-enterprise-client-${version}" -else - install_dir="${release_dir}/jh_iot-client-${version}" -fi - -# Directories and files. - -if [ "$osType" != "Darwin" ]; then - lib_files="${build_dir}/lib/libtaos.so.${version}" -else - bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_jh.sh" - lib_files="${build_dir}/lib/libtaos.${version}.dylib" -fi - -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi - -install_files="${script_dir}/install_client_jh.sh" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg - -sed -i '/dataDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/jh_iot/g" ${install_dir}/cfg/taos.cfg - -mkdir -p ${install_dir}/bin -if [ "$osType" != "Darwin" ]; then - if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taos - cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos - cp ${script_dir}/remove_client_jh.sh ${install_dir}/bin - else - cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos - cp ${script_dir}/remove_client_jh.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin - fi -else - cp ${bin_files} ${install_dir}/bin -fi -chmod a+x ${install_dir}/bin/* || : - -if [ -f ${build_dir}/bin/jemalloc-config ]; then - mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} - cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin - if [ -f ${build_dir}/bin/jemalloc.sh ]; then - cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/bin/jeprof ]; then - cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then - cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc - fi - if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then - cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib - ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so - fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then - cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig - fi - if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then - cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc - fi - if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then - cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 - fi -fi - -cd ${install_dir} - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f jh_taos.tar.gz * --remove-files || : -else - tar -zcv -f jh_taos.tar.gz * || : - mv jh_taos.tar.gz .. - rm -rf ./* - mv ../jh_taos.tar.gz . -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$osType" == "Darwin" ]; then - sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_jh.sh >> install_client_jh_temp.sh - mv install_client_jh_temp.sh ${install_dir}/install_client_jh.sh -fi -if [ "$pagMode" == "lite" ]; then - sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_jh.sh >> install_client_jh_temp.sh - mv install_client_jh_temp.sh ${install_dir}/install_client_jh.sh -fi -chmod a+x ${install_dir}/install_client_jh.sh - -# Copy driver -mkdir -p ${install_dir}/driver -cp ${lib_files} ${install_dir}/driver - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stable or beta" - exit 1 -fi - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -else - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : - mv "$(basename ${pkg_name}).tar.gz" .. - rm -rf ./* - mv ../"$(basename ${pkg_name}).tar.gz" . -fi - -cd ${curr_dir} diff --git a/packaging/tools/makeclient_kh.sh b/packaging/tools/makeclient_kh.sh deleted file mode 100755 index fe171664f62b07152c876846e8a64cc41b8c2eed..0000000000000000000000000000000000000000 --- a/packaging/tools/makeclient_kh.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for linux client in all os system -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -if [ "$osType" != "Darwin" ]; then - script_dir="$(dirname $(readlink -f $0))" - top_dir="$(readlink -f ${script_dir}/../..)" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - top_dir=${script_dir}/../.. -fi - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' - -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/KingHistorian-enterprise-client-${version}" -else - install_dir="${release_dir}/KingHistorian-client-${version}" -fi - -# Directories and files. - -if [ "$osType" != "Darwin" ]; then - lib_files="${build_dir}/lib/libtaos.so.${version}" -else - bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_kh.sh" - lib_files="${build_dir}/lib/libtaos.${version}.dylib" -fi - -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi - -install_files="${script_dir}/install_client_kh.sh" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghistorian.cfg - -sed -i '/dataDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg -sed -i '/logDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg -sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg - -mkdir -p ${install_dir}/bin -if [ "$osType" != "Darwin" ]; then - if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taos - cp ${build_dir}/bin/taos ${install_dir}/bin/khclient - cp ${script_dir}/remove_client_kh.sh ${install_dir}/bin - else - cp ${build_dir}/bin/taos ${install_dir}/bin/khclient - cp ${script_dir}/remove_client_kh.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin - fi -else - cp ${bin_files} ${install_dir}/bin -fi -chmod a+x ${install_dir}/bin/* || : - -if [ -f ${build_dir}/bin/jemalloc-config ]; then - mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} - cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin - if [ -f ${build_dir}/bin/jemalloc.sh ]; then - cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/bin/jeprof ]; then - cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then - cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc - fi - if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then - cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib - ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so - fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then - cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig - fi - if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then - cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc - fi - if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then - cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 - fi -fi - -cd ${install_dir} - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f kinghistorian.tar.gz * --remove-files || : -else - tar -zcv -f kinghistorian.tar.gz * || : - mv kinghistorian.tar.gz .. - rm -rf ./* - mv ../kinghistorian.tar.gz . -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$osType" == "Darwin" ]; then - sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_kh.sh >> install_client_kh_temp.sh - mv install_client_kh_temp.sh ${install_dir}/install_client_kh.sh -fi -if [ "$pagMode" == "lite" ]; then - sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_kh.sh >> install_client_kh_temp.sh - mv install_client_kh_temp.sh ${install_dir}/install_client_kh.sh -fi -chmod a+x ${install_dir}/install_client_kh.sh - -# Copy driver -mkdir -p ${install_dir}/driver -cp ${lib_files} ${install_dir}/driver - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stable or beta" - exit 1 -fi - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -else - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : - mv "$(basename ${pkg_name}).tar.gz" .. - rm -rf ./* - mv ../"$(basename ${pkg_name}).tar.gz" . -fi - -cd ${curr_dir} diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh deleted file mode 100755 index 0931d1350197bf31cfaa9f8a87cd1fe50d28ced3..0000000000000000000000000000000000000000 --- a/packaging/tools/makeclient_power.sh +++ /dev/null @@ -1,264 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for linux client in all os system -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -if [ "$osType" != "Darwin" ]; then - script_dir="$(dirname $(readlink -f $0))" - top_dir="$(readlink -f ${script_dir}/../..)" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - top_dir=${script_dir}/../.. -fi - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' - -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/PowerDB-enterprise-client-${version}" -else - install_dir="${release_dir}/PowerDB-client-${version}" -fi - -# Directories and files. - -if [ "$osType" != "Darwin" ]; then -# if [ "$pagMode" == "lite" ]; then -# strip ${build_dir}/bin/powerd -# strip ${build_dir}/bin/power -# bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh" -# else -# bin_files="${build_dir}/bin/power ${build_dir}/bin/powerdemo ${script_dir}/remove_client_power.sh ${script_dir}/set_core.sh" -# fi - lib_files="${build_dir}/lib/libtaos.so.${version}" -else - bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh" - lib_files="${build_dir}/lib/libtaos.${version}.dylib" -fi - -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi - -install_files="${script_dir}/install_client_power.sh" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg - -if [ -f ${build_dir}/bin/jemalloc-config ]; then - mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} - cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin - if [ -f ${build_dir}/bin/jemalloc.sh ]; then - cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/bin/jeprof ]; then - cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then - cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc - fi - if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then - cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib - ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so - fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then - cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig - fi - if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then - cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc - fi - if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then - cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 - fi -fi - -sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg - -mkdir -p ${install_dir}/bin -if [ "$osType" != "Darwin" ]; then - if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taos - cp ${build_dir}/bin/taos ${install_dir}/bin/power - cp ${script_dir}/remove_client_power.sh ${install_dir}/bin - else - cp ${build_dir}/bin/taos ${install_dir}/bin/power - cp ${script_dir}/remove_client_power.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin - fi -else - cp ${bin_files} ${install_dir}/bin -fi -chmod a+x ${install_dir}/bin/* || : - -if [ -f ${build_dir}/bin/jemalloc-config ]; then - mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} - cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin - if [ -f ${build_dir}/bin/jemalloc.sh ]; then - cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/bin/jeprof ]; then - cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then - cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc - fi - if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then - cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib - ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so - fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then - cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig - fi - if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then - cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc - fi - if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then - cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 - fi -fi - -cd ${install_dir} - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f power.tar.gz * --remove-files || : -else - tar -zcv -f power.tar.gz * || : - mv power.tar.gz .. - rm -rf ./* - mv ../power.tar.gz . -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$osType" == "Darwin" ]; then - sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_power.sh >> install_client_power_temp.sh - mv install_client_power_temp.sh ${install_dir}/install_client_power.sh -fi -if [ "$pagMode" == "lite" ]; then - sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_power.sh >> install_client_power_temp.sh - mv install_client_power_temp.sh ${install_dir}/install_client_power.sh -fi -chmod a+x ${install_dir}/install_client_power.sh - -# Copy example code -mkdir -p ${install_dir}/examples -examples_dir="${top_dir}/tests/examples" -cp -r ${examples_dir}/c ${install_dir}/examples -sed -i '/passwd/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c -sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp -r ${examples_dir}/JDBC ${install_dir}/examples - cp -r ${examples_dir}/matlab ${install_dir}/examples - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m - cp -r ${examples_dir}/python ${install_dir}/examples - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py - cp -r ${examples_dir}/R ${install_dir}/examples - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt - cp -r ${examples_dir}/go ${install_dir}/examples - sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go -fi -# Copy driver -mkdir -p ${install_dir}/driver -cp ${lib_files} ${install_dir}/driver - -# Copy connector -connector_dir="${code_dir}/connector" -mkdir -p ${install_dir}/connector - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - fi - if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then - cp -r ${connector_dir}/go ${install_dir}/connector - else - echo "WARNING: go connector not found, please check if want to use it!" - fi - cp -r ${connector_dir}/python ${install_dir}/connector - - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py - - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py - - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py -fi -# Copy release note -# cp ${script_dir}/release_note ${install_dir} - -# exit 1 - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stable or beta" - exit 1 -fi - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -else - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : - mv "$(basename ${pkg_name}).tar.gz" .. - rm -rf ./* - mv ../"$(basename ${pkg_name}).tar.gz" . -fi - -cd ${curr_dir} diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh deleted file mode 100755 index 023c16cf820481fcc16bb26f31e6acf58d8edbc1..0000000000000000000000000000000000000000 --- a/packaging/tools/makeclient_pro.sh +++ /dev/null @@ -1,220 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for linux client in all os system -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -if [ "$osType" != "Darwin" ]; then - script_dir="$(dirname $(readlink -f $0))" - top_dir="$(readlink -f ${script_dir}/../..)" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - top_dir=${script_dir}/../.. -fi - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' - -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/ProDB-enterprise-client-${version}" -else - install_dir="${release_dir}/ProDB-client-${version}" -fi - -# Directories and files. - -if [ "$osType" != "Darwin" ]; then - lib_files="${build_dir}/lib/libtaos.so.${version}" -else - bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_pro.sh" - lib_files="${build_dir}/lib/libtaos.${version}.dylib" -fi - -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi - -install_files="${script_dir}/install_client_pro.sh" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/prodb.cfg - -sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg -sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg -sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/prodb.cfg - -mkdir -p ${install_dir}/bin -if [ "$osType" != "Darwin" ]; then - if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taos - cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc - cp ${script_dir}/remove_client_pro.sh ${install_dir}/bin - else - cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc - cp ${script_dir}/remove_client_pro.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin - fi -else - cp ${bin_files} ${install_dir}/bin -fi -chmod a+x ${install_dir}/bin/* || : - -if [ -f ${build_dir}/bin/jemalloc-config ]; then - mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} - cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin - if [ -f ${build_dir}/bin/jemalloc.sh ]; then - cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/bin/jeprof ]; then - cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then - cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc - fi - if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then - cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib - ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so - fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then - cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig - fi - if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then - cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc - fi - if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then - cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 - fi -fi - -cd ${install_dir} - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f prodb.tar.gz * --remove-files || : -else - tar -zcv -f prodb.tar.gz * || : - mv prodb.tar.gz .. - rm -rf ./* - mv ../prodb.tar.gz . -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$osType" == "Darwin" ]; then - sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_pro.sh >> install_client_prodb_temp.sh - mv install_client_prodb_temp.sh ${install_dir}/install_client_pro.sh -fi -if [ "$pagMode" == "lite" ]; then - sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_pro.sh >> install_client_prodb_temp.sh - mv install_client_prodb_temp.sh ${install_dir}/install_client_pro.sh -fi -chmod a+x ${install_dir}/install_client_pro.sh - -# Copy example code -mkdir -p ${install_dir}/examples -examples_dir="${top_dir}/tests/examples" -cp -r ${examples_dir}/c ${install_dir}/examples -sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c -sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp -r ${examples_dir}/JDBC ${install_dir}/examples - cp -r ${examples_dir}/matlab ${install_dir}/examples - mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m - sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m - cp -r ${examples_dir}/python ${install_dir}/examples - sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py - cp -r ${examples_dir}/R ${install_dir}/examples - sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt - cp -r ${examples_dir}/go ${install_dir}/examples - mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go - sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go -fi -# Copy driver -mkdir -p ${install_dir}/driver -cp ${lib_files} ${install_dir}/driver - -# Copy connector -connector_dir="${code_dir}/connector" -mkdir -p ${install_dir}/connector - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - fi - if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then - cp -r ${connector_dir}/go ${install_dir}/connector - else - echo "WARNING: go connector not found, please check if want to use it!" - fi - cp -r ${connector_dir}/python ${install_dir}/connector - mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb - sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py - sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py - sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py -fi - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stable or beta" - exit 1 -fi - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -else - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : - mv "$(basename ${pkg_name}).tar.gz" .. - rm -rf ./* - mv ../"$(basename ${pkg_name}).tar.gz" . -fi - -cd ${curr_dir} diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh deleted file mode 100755 index d554a05f6af40b79362d397071026591cf6714d4..0000000000000000000000000000000000000000 --- a/packaging/tools/makeclient_tq.sh +++ /dev/null @@ -1,224 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for linux client in all os system -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 - -if [ "$osType" != "Darwin" ]; then - script_dir="$(dirname $(readlink -f $0))" - top_dir="$(readlink -f ${script_dir}/../..)" -else - script_dir=`dirname $0` - cd ${script_dir} - script_dir="$(pwd)" - top_dir=${script_dir}/../.. -fi - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' - -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TQ-enterprise-client-${version}" -else - install_dir="${release_dir}/TQ-client-${version}" -fi - -# Directories and files. - -if [ "$osType" != "Darwin" ]; then - lib_files="${build_dir}/lib/libtaos.so.${version}" -else - bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh" - lib_files="${build_dir}/lib/libtaos.${version}.dylib" -fi - -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi - -install_files="${script_dir}/install_client_tq.sh" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/tq.cfg - -sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg -sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg -sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/tq.cfg - -mkdir -p ${install_dir}/bin -if [ "$osType" != "Darwin" ]; then - if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taos - cp ${build_dir}/bin/taos ${install_dir}/bin/tq - cp ${script_dir}/remove_client_tq.sh ${install_dir}/bin - else - cp ${build_dir}/bin/taos ${install_dir}/bin/tq - cp ${script_dir}/remove_client_tq.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin - fi -else - cp ${bin_files} ${install_dir}/bin -fi -chmod a+x ${install_dir}/bin/* || : - -if [ -f ${build_dir}/bin/jemalloc-config ]; then - mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} - cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin - if [ -f ${build_dir}/bin/jemalloc.sh ]; then - cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/bin/jeprof ]; then - cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin - fi - if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then - cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc - fi - if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then - cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib - ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so - fi - if [ -f ${build_dir}/lib/libjemalloc.a ]; then - cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then - cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib - fi - if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then - cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig - fi - if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then - cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc - fi - if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then - cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 - fi -fi - -cd ${install_dir} - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f tq.tar.gz * --remove-files || : -else - tar -zcv -f tq.tar.gz * || : - mv tq.tar.gz .. - rm -rf ./* - mv ../tq.tar.gz . -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$osType" == "Darwin" ]; then - sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_tq.sh >> install_client_tq_temp.sh - mv install_client_tq_temp.sh ${install_dir}/install_client_tq.sh -fi -if [ "$pagMode" == "lite" ]; then - sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_tq.sh >> install_client_tq_temp.sh - mv install_client_tq_temp.sh ${install_dir}/install_client_tq.sh -fi -chmod a+x ${install_dir}/install_client_tq.sh - -# Copy example code -mkdir -p ${install_dir}/examples -examples_dir="${top_dir}/tests/examples" -cp -r ${examples_dir}/c ${install_dir}/examples -sed -i '/passwd/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c -sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp -r ${examples_dir}/JDBC ${install_dir}/examples - cp -r ${examples_dir}/matlab ${install_dir}/examples - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/matlab/TDengineDemo.m - cp -r ${examples_dir}/python ${install_dir}/examples - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/python/read_example.py - cp -r ${examples_dir}/R ${install_dir}/examples - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/R/command.txt - cp -r ${examples_dir}/go ${install_dir}/examples - sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/go/taosdemo.go -fi -# Copy driver -mkdir -p ${install_dir}/driver -cp ${lib_files} ${install_dir}/driver - -# Copy connector -connector_dir="${code_dir}/connector" -mkdir -p ${install_dir}/connector - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - if [ "$osType" != "Darwin" ]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - fi - if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then - cp -r ${connector_dir}/go ${install_dir}/connector - else - echo "WARNING: go connector not found, please check if want to use it!" - fi - cp -r ${connector_dir}/python ${install_dir}/connector - - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/cinterface.py - - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/subscription.py - - sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py -fi -# Copy release note -# cp ${script_dir}/release_note ${install_dir} - -# exit 1 - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stable or beta" - exit 1 -fi - -if [ "$osType" != "Darwin" ]; then - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -else - tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : - mv "$(basename ${pkg_name}).tar.gz" .. - rm -rf ./* - mv ../"$(basename ${pkg_name}).tar.gz" . -fi - -cd ${curr_dir} diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index e3c1ab269abadad8b75d31a0a3c9dfd1336d36f2..55332c96c21b52155aa13e572d52f523e7831012 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -19,6 +19,12 @@ versionComp=$9 script_dir="$(dirname $(readlink -f $0))" top_dir="$(readlink -f ${script_dir}/../..)" +productName="TDengine" +serverName="taosd" +clientName="taos" +configFile="taos.cfg" +tarName="taos.tar.gz" + # create compressed install file. build_dir="${compile_dir}/build" code_dir="${top_dir}/src" @@ -26,9 +32,9 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TDengine-enterprise-server-${version}" + install_dir="${release_dir}/${productName}-enterprise-server-${version}" else - install_dir="${release_dir}/TDengine-server-${version}" + install_dir="${release_dir}/${productName}-server-${version}" fi if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then @@ -45,19 +51,19 @@ fi # Directories and files if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd - strip ${build_dir}/bin/taos + strip ${build_dir}/bin/${serverName} + strip ${build_dir}/bin/${clientName} # lite version doesn't include taosadapter, which will lead to no restful interface - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh" + bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh" taostools_bin_files="" else - bin_files="${build_dir}/bin/taosd \ - ${build_dir}/bin/taos \ + bin_files="${build_dir}/bin/${serverName} \ + ${build_dir}/bin/${clientName} \ ${build_dir}/bin/taosadapter \ ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh \ ${script_dir}/set_core.sh \ - ${script_dir}/run_taosd.sh \ + ${script_dir}/run_taosd_and_taosadapter.sh \ ${script_dir}/startPre.sh \ ${script_dir}/taosd-dump-cfg.gdb" @@ -76,13 +82,6 @@ fi install_files="${script_dir}/install.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" -# Init file -#init_dir=${script_dir}/deb -#if [ $package_type = "centos" ]; then -# init_dir=${script_dir}/rpm -#fi -#init_files=${init_dir}/taosd -# temp use rpm's taosd. TODO: later modify according to os type init_file_deb=${script_dir}/../deb/taosd init_file_rpm=${script_dir}/../rpm/taosd init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord @@ -91,7 +90,7 @@ init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/${configFile} ${install_dir}/cfg/${configFile} if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then @@ -102,19 +101,21 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || : fi -if [ -f "${cfg_dir}/taosd.service" ]; then - cp ${cfg_dir}/taosd.service ${install_dir}/cfg || : +if [ -f "${cfg_dir}/${serverName}.service" ]; then + cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || : fi + if [ -f "${cfg_dir}/tarbitratord.service" ]; then cp ${cfg_dir}/tarbitratord.service ${install_dir}/cfg || : fi + if [ -f "${cfg_dir}/nginxd.service" ]; then cp ${cfg_dir}/nginxd.service ${install_dir}/cfg || : fi mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : -mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb -mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm +mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb +mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : @@ -123,9 +124,6 @@ if [ -n "${taostools_bin_files}" ]; then mkdir -p ${taostools_install_dir}/bin \ && cp ${taostools_bin_files} ${taostools_install_dir}/bin \ && chmod a+x ${taostools_install_dir}/bin/* || : -# [ -f ${taostools_install_dir}/bin/taosBenchmark ] && \ -# ln -sf ${taostools_install_dir}/bin/taosBenchmark \ -# ${taostools_install_dir}/bin/taosdemo if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \ @@ -202,10 +200,10 @@ if [ "$verMode" == "cluster" ]; then fi cd ${install_dir} -tar -zcv -f taos.tar.gz * --remove-files || : +tar -zcv -f ${tarName} * --remove-files || : exitcode=$? if [ "$exitcode" != "0" ]; then - echo "tar taos.tar.gz error !!!" + echo "tar ${tarName} error !!!" exit $exitcode fi diff --git a/packaging/tools/makepkg_jh.sh b/packaging/tools/makepkg_jh.sh deleted file mode 100755 index fdc7ba5c79f66bdc5d9567999c644c2a3b4ad1f1..0000000000000000000000000000000000000000 --- a/packaging/tools/makepkg_jh.sh +++ /dev/null @@ -1,161 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 -versionComp=$9 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -# package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/jh_iot-enterprise-server-${version}" -else - install_dir="${release_dir}/jh_iot-server-${version}" -fi - -lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi -install_files="${script_dir}/install_jh.sh" -nginx_dir="${code_dir}/../../enterprise/src/plugins/web" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg -mkdir -p ${install_dir}/bin - -# bin -if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd - strip ${build_dir}/bin/taos -else - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump - cp ${build_dir}/bin/tarbitrator ${install_dir}/bin - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/run_taosd.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - cp ${script_dir}/startPre.sh ${install_dir}/bin - cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin -fi -cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos -cp ${build_dir}/bin/taosd ${install_dir}/bin/jh_taosd -cp ${script_dir}/remove_jh.sh ${install_dir}/bin -chmod a+x ${install_dir}/bin/* || : - -# cluster -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_jh.sh >> remove_jh_temp.sh - mv remove_jh_temp.sh ${install_dir}/bin/remove_jh.sh - - mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd - cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png - rm -rf ${install_dir}/nginxd/png - - # replace the OEM name - sed -i -e 's/www.taosdata.com/www.jhict.com/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") - sed -i -e 's/2017/2021/g' $(grep -r '2017' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") - sed -i -e 's/TAOS Data/Jinheng Technology/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") - sed -i -e 's/taosd/jh_taosd/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq` - sed -i -e 's/taosd<\/th>/jh_taosd<\/th>/g' ${install_dir}/nginxd/admin/monitor.html - sed -i -e "s/data:\['taosd', 'system'\],/data:\['jh_taosd', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html - sed -i -e "s/name: 'taosd',/name: 'jh_taosd',/g" ${install_dir}/nginxd/admin/monitor.html - sed -i "s/TDengine/jh_iot/g" ${install_dir}/nginxd/admin/*.html - sed -i "s/TDengine/jh_iot/g" ${install_dir}/nginxd/admin/js/*.js - - if [ "$cpuType" == "aarch64" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ - elif [ "$cpuType" == "aarch32" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ - fi - rm -rf ${install_dir}/nginxd/sbin/arm -fi - -sed -i '/dataDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/jh_iot/g" ${install_dir}/cfg/taos.cfg -sed -i "s/support@taosdata.com/jhkj@njsteel.com.cn/g" ${install_dir}/cfg/taos.cfg -sed -i "s/taos client/client/g" ${install_dir}/cfg/taos.cfg -sed -i "s/taosd/server/g" ${install_dir}/cfg/taos.cfg - -cd ${install_dir} -tar -zcv -f jh_taos.tar.gz * --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar jh_taos.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_jh.sh >> install_jh_temp.sh - mv install_jh_temp.sh ${install_dir}/install_jh.sh -fi -if [ "$pagMode" == "lite" ]; then - sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/jh_taos_history/g" ${install_dir}/install.sh >> install_jh_temp.sh - mv install_jh_temp.sh ${install_dir}/install_jh.sh -fi - -sed -i "/install_connector$/d" ${install_dir}/install_jh.sh -sed -i "/install_examples$/d" ${install_dir}/install_jh.sh -chmod a+x ${install_dir}/install_jh.sh - -# Copy driver -mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makepkg_kh.sh b/packaging/tools/makepkg_kh.sh deleted file mode 100755 index 35efd2e0d7d4b5dad8bc2b360910eca69118bc6c..0000000000000000000000000000000000000000 --- a/packaging/tools/makepkg_kh.sh +++ /dev/null @@ -1,161 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 -versionComp=$9 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -# package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/KingHistorian-enterprise-server-${version}" -else - install_dir="${release_dir}/KingHistorian-server-${version}" -fi - -lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi -install_files="${script_dir}/install_kh.sh" -nginx_dir="${code_dir}/../../enterprise/src/plugins/web" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghistorian.cfg -mkdir -p ${install_dir}/bin - -# bin -if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd - strip ${build_dir}/bin/taos -else - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump - cp ${build_dir}/bin/tarbitrator ${install_dir}/bin - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/run_taosd.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - cp ${script_dir}/startPre.sh ${install_dir}/bin - cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin -fi -cp ${build_dir}/bin/taos ${install_dir}/bin/khclient -cp ${build_dir}/bin/taosd ${install_dir}/bin/khserver -cp ${script_dir}/remove_kh.sh ${install_dir}/bin -chmod a+x ${install_dir}/bin/* || : - -# cluster -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_kh.sh >> remove_kh_temp.sh - mv remove_kh_temp.sh ${install_dir}/bin/remove_kh.sh - - mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd - cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png - rm -rf ${install_dir}/nginxd/png - - # replace the OEM name, add by yangzy@2021-09-22 - sed -i -e 's/www.taosdata.com/www.wellintech.com/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") - sed -i -e 's/2017/2021/g' $(grep -r '2017' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") - sed -i -e 's/TAOS Data/Wellintech/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") - sed -i -e 's/taosd/khserver/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq` - sed -i -e 's/taosd<\/th>/khserver<\/th>/g' ${install_dir}/nginxd/admin/monitor.html - sed -i -e "s/data:\['taosd', 'system'\],/data:\['khserver', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html - sed -i -e "s/name: 'taosd',/name: 'khserver',/g" ${install_dir}/nginxd/admin/monitor.html - sed -i "s/TDengine/KingHistorian/g" ${install_dir}/nginxd/admin/*.html - sed -i "s/TDengine/KingHistorian/g" ${install_dir}/nginxd/admin/js/*.js - - if [ "$cpuType" == "aarch64" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ - elif [ "$cpuType" == "aarch32" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ - fi - rm -rf ${install_dir}/nginxd/sbin/arm -fi - -sed -i '/dataDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg -sed -i '/logDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg -sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg -sed -i "s/support@taosdata.com/support@wellintech.com/g" ${install_dir}/cfg/kinghistorian.cfg -sed -i "s/taos client/khclient/g" ${install_dir}/cfg/kinghistorian.cfg -sed -i "s/taosd/khserver/g" ${install_dir}/cfg/kinghistorian.cfg - -cd ${install_dir} -tar -zcv -f kinghistorian.tar.gz * --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar kinghistorian.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_kh.sh >> install_kh_temp.sh - mv install_kh_temp.sh ${install_dir}/install_kh.sh -fi -if [ "$pagMode" == "lite" ]; then - sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/kh_history/g" ${install_dir}/install.sh >> install_kh_temp.sh - mv install_kh_temp.sh ${install_dir}/install_kh.sh -fi - -sed -i "/install_connector$/d" ${install_dir}/install_kh.sh -sed -i "/install_examples$/d" ${install_dir}/install_kh.sh -chmod a+x ${install_dir}/install_kh.sh - -# Copy driver -mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh deleted file mode 100755 index ab2a662b118b9a6a62dbf87fc54d78358ab1861f..0000000000000000000000000000000000000000 --- a/packaging/tools/makepkg_power.sh +++ /dev/null @@ -1,204 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 -versionComp=$9 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/PowerDB-enterprise-server-${version}" -else - install_dir="${release_dir}/PowerDB-server-${version}" -fi - -lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi -install_files="${script_dir}/install_power.sh" -nginx_dir="${code_dir}/../../enterprise/src/plugins/web" - -init_file_deb=${script_dir}/../deb/powerd -init_file_rpm=${script_dir}/../rpm/powerd -init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord -init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/power.cfg - -#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : -mkdir -p ${install_dir}/bin -if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd - strip ${build_dir}/bin/taos -# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh" - cp ${build_dir}/bin/taos ${install_dir}/bin/power - cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd - cp ${script_dir}/remove_power.sh ${install_dir}/bin -else -# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh" - cp ${build_dir}/bin/taos ${install_dir}/bin/power - cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd - cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: - cp ${script_dir}/remove_power.sh ${install_dir}/bin - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump - cp ${build_dir}/bin/tarbitrator ${install_dir}/bin - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/run_taosd.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - cp ${script_dir}/startPre.sh ${install_dir}/bin - cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin -fi -chmod a+x ${install_dir}/bin/* || : - -mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/powerd.deb -mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/powerd.rpm -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : -mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : - -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_power.sh >> remove_power_temp.sh - mv remove_power_temp.sh ${install_dir}/bin/remove_power.sh - - mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd - cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png - rm -rf ${install_dir}/nginxd/png - - sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html - sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js - - sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/power.cfg - sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/power.cfg - sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/power.cfg - - if [ "$cpuType" == "aarch64" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ - elif [ "$cpuType" == "aarch32" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ - fi - rm -rf ${install_dir}/nginxd/sbin/arm -fi - -cd ${install_dir} -tar -zcv -f power.tar.gz * --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar power.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_power.sh >> install_power_temp.sh - mv install_power_temp.sh ${install_dir}/install_power.sh -fi -if [ "$pagMode" == "lite" ]; then - sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_power_temp.sh - mv install_power_temp.sh ${install_dir}/install_power.sh -fi -chmod a+x ${install_dir}/install_power.sh - -# Copy example code -mkdir -p ${install_dir}/examples -examples_dir="${top_dir}/tests/examples" -cp -r ${examples_dir}/c ${install_dir}/examples -sed -i '/passwd/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c -sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp -r ${examples_dir}/JDBC ${install_dir}/examples - cp -r ${examples_dir}/matlab ${install_dir}/examples - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m - cp -r ${examples_dir}/python ${install_dir}/examples - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py - cp -r ${examples_dir}/R ${install_dir}/examples - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt - cp -r ${examples_dir}/go ${install_dir}/examples - sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go -fi -# Copy driver -mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt - -# Copy connector -connector_dir="${code_dir}/connector" -mkdir -p ${install_dir}/connector -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - - if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then - cp -r ${connector_dir}/go ${install_dir}/connector - else - echo "WARNING: go connector not found, please check if want to use it!" - fi - cp -r ${connector_dir}/python ${install_dir}/connector/ - - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py - - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py - - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py -fi -# Copy release note -# cp ${script_dir}/release_note ${install_dir} - -# exit 1 - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh deleted file mode 100755 index ea370b2d95437b13f932b1925a4de8d1073cd294..0000000000000000000000000000000000000000 --- a/packaging/tools/makepkg_pro.sh +++ /dev/null @@ -1,162 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 -versionComp=$9 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -# package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/ProDB-enterprise-server-${version}" -else - install_dir="${release_dir}/ProDB-server-${version}" -fi - -lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi -install_files="${script_dir}/install_pro.sh" -nginx_dir="${code_dir}/../../enterprise/src/plugins/web" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/prodb.cfg -mkdir -p ${install_dir}/bin - -# bin -if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd - strip ${build_dir}/bin/taos -else - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo - cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump - cp ${build_dir}/bin/tarbitrator ${install_dir}/bin - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/run_taosd.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - cp ${script_dir}/startPre.sh ${install_dir}/bin - cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin -fi -cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc -cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs -cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: -cp ${script_dir}/remove_pro.sh ${install_dir}/bin -chmod a+x ${install_dir}/bin/* || : - -# cluster -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_pro.sh >> remove_prodb_temp.sh - mv remove_prodb_temp.sh ${install_dir}/bin/remove_pro.sh - - mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd - cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png - rm -rf ${install_dir}/nginxd/png - - # replace the OEM name, add by yangzy@2021-09-22 - sed -i -e 's/www.taosdata.com/www.hanatech.com.cn/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") - sed -i -e 's/TAOS Data/Hanatech/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") - sed -i -e 's/taosd/prodbs/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq` - - sed -i -e 's/taosd<\/th>/prodbs<\/th>/g' ${install_dir}/nginxd/admin/monitor.html - sed -i -e "s/data:\['taosd', 'system'\],/data:\['prodbs', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html - sed -i -e "s/name: 'taosd',/name: 'prodbs',/g" ${install_dir}/nginxd/admin/monitor.html - sed -i "s/TDengine/ProDB/g" ${install_dir}/nginxd/admin/*.html - sed -i "s/TDengine/ProDB/g" ${install_dir}/nginxd/admin/js/*.js - - if [ "$cpuType" == "aarch64" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ - elif [ "$cpuType" == "aarch32" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ - fi - rm -rf ${install_dir}/nginxd/sbin/arm -fi - -sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg -sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg -sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/prodb.cfg -sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${install_dir}/cfg/prodb.cfg -sed -i "s/taos client/prodbc/g" ${install_dir}/cfg/prodb.cfg -sed -i "s/taosd/prodbs/g" ${install_dir}/cfg/prodb.cfg - -cd ${install_dir} -tar -zcv -f prodb.tar.gz * --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar prodb.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_pro.sh >> install_prodb_temp.sh - mv install_prodb_temp.sh ${install_dir}/install_pro.sh -fi -if [ "$pagMode" == "lite" ]; then - sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/prodb_history/g" ${install_dir}/install.sh >> install_prodb_temp.sh - mv install_prodb_temp.sh ${install_dir}/install_pro.sh -fi - -sed -i "/install_connector$/d" ${install_dir}/install_pro.sh -sed -i "/install_examples$/d" ${install_dir}/install_pro.sh -chmod a+x ${install_dir}/install_pro.sh - -# Copy driver -mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh deleted file mode 100755 index ff406421eaacf290e2e9269c358176fee20a80bb..0000000000000000000000000000000000000000 --- a/packaging/tools/makepkg_tq.sh +++ /dev/null @@ -1,186 +0,0 @@ -#!/bin/bash -# -# Generate tar.gz package for all os system - -set -e -#set -x - -curr_dir=$(pwd) -compile_dir=$1 -version=$2 -build_time=$3 -cpuType=$4 -osType=$5 -verMode=$6 -verType=$7 -pagMode=$8 -versionComp=$9 - -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" - -# create compressed install file. -build_dir="${compile_dir}/build" -code_dir="${top_dir}/src" -release_dir="${top_dir}/release" - -#package_name='linux' -if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TQ-enterprise-server-${version}" -else - install_dir="${release_dir}/TQ-server-${version}" -fi - -lib_files="${build_dir}/lib/libtaos.so.${version}" -header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h" -if [ "$verMode" == "cluster" ]; then - cfg_dir="${top_dir}/../enterprise/packaging/cfg" -else - cfg_dir="${top_dir}/packaging/cfg" -fi -install_files="${script_dir}/install_tq.sh" -nginx_dir="${code_dir}/../../enterprise/src/plugins/web" - -# make directories. -mkdir -p ${install_dir} -mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/tq.cfg - -mkdir -p ${install_dir}/bin -if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd - strip ${build_dir}/bin/taos - cp ${build_dir}/bin/taos ${install_dir}/bin/tq - cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd - cp ${script_dir}/remove_tq.sh ${install_dir}/bin -else - cp ${build_dir}/bin/taos ${install_dir}/bin/tq - cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd - cp ${script_dir}/remove_tq.sh ${install_dir}/bin - cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: - cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo - cp ${build_dir}/bin/tarbitrator ${install_dir}/bin - cp ${script_dir}/set_core.sh ${install_dir}/bin - cp ${script_dir}/run_taosd.sh ${install_dir}/bin - cp ${script_dir}/get_client.sh ${install_dir}/bin - cp ${script_dir}/startPre.sh ${install_dir}/bin - cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin -fi -chmod a+x ${install_dir}/bin/* || : - -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_tq.sh >> remove_tq_temp.sh - mv remove_tq_temp.sh ${install_dir}/bin/remove_tq.sh - - mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd - cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png - rm -rf ${install_dir}/nginxd/png - - sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/*.html - sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/js/*.js - - sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg - sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg - sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/tq.cfg - - if [ "$cpuType" == "aarch64" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ - elif [ "$cpuType" == "aarch32" ]; then - cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ - fi - rm -rf ${install_dir}/nginxd/sbin/arm -fi - -cd ${install_dir} -tar -zcv -f tq.tar.gz * --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar tq.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} -cp ${install_files} ${install_dir} -if [ "$verMode" == "cluster" ]; then - sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_tq.sh >> install_tq_temp.sh - mv install_tq_temp.sh ${install_dir}/install_tq.sh -fi -if [ "$pagMode" == "lite" ]; then - sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_tq_temp.sh - mv install_tq_temp.sh ${install_dir}/install_tq.sh -fi -chmod a+x ${install_dir}/install_tq.sh - -# Copy example code -mkdir -p ${install_dir}/examples -examples_dir="${top_dir}/tests/examples" -cp -r ${examples_dir}/c ${install_dir}/examples -sed -i '/passwd/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c -sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c - -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp -r ${examples_dir}/JDBC ${install_dir}/examples - cp -r ${examples_dir}/matlab ${install_dir}/examples - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/matlab/TDengineDemo.m - cp -r ${examples_dir}/python ${install_dir}/examples - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/python/read_example.py - cp -r ${examples_dir}/R ${install_dir}/examples - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/R/command.txt - cp -r ${examples_dir}/go ${install_dir}/examples - sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/go/taosdemo.go -fi -# Copy driver -mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt - -# Copy connector -connector_dir="${code_dir}/connector" -mkdir -p ${install_dir}/connector -if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then - cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - - if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then - cp -r ${connector_dir}/go ${install_dir}/connector - else - echo "WARNING: go connector not found, please check if want to use it!" - fi - cp -r ${connector_dir}/python ${install_dir}/connector/ - - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/cinterface.py - - sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/subscription.py - - sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py -fi - -cd ${release_dir} - -if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${osType}-${cpuType} -else - echo "unknow verMode, nor cluster or edge" - exit 1 -fi - -if [ "$pagMode" == "lite" ]; then - pkg_name=${pkg_name}-Lite -fi - -if [ "$verType" == "beta" ]; then - pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} -else - echo "unknow verType, nor stabel or beta" - exit 1 -fi - -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -exitcode=$? -if [ "$exitcode" != "0" ]; then - echo "tar ${pkg_name}.tar.gz error !!!" - exit $exitcode -fi - -cd ${curr_dir} diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 7fc2f984570f65a0b0381e5e54510e0fef667c6a..e0da436a2ec3e4217d531bdc3a4c85a4152bc071 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -427,8 +427,8 @@ function install_service_on_systemd() { ${csudo}bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo}bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo}bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" + ${csudo}bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" ${csudo}bash -c "echo >> ${taosd_service_config}" ${csudo}bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo}bash -c "echo 'Type=simple' >> ${taosd_service_config}" diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index b9bd1c163809c1d2dabfff6f85ffaa765378cede..2eff41fe41df4c0dd01283f4e4d972148b0003ac 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -11,201 +11,200 @@ RED='\033[0;31m' GREEN='\033[1;32m' NC='\033[0m' +installDir="/usr/local/taos" +serverName="taosd" +clientName="taos" +uninstallScript="rmtaos" +productName="TDengine" + #install main path -install_main_dir="/usr/local/taos" -data_link_dir="/usr/local/taos/data" -log_link_dir="/usr/local/taos/log" -cfg_link_dir="/usr/local/taos/cfg" +install_main_dir=${installDir} +data_link_dir=${installDir}/data +log_link_dir=${installDir}/log +cfg_link_dir=${installDir}/cfg bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" install_nginxd_dir="/usr/local/nginxd" -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/taos" - service_config_dir="/etc/systemd/system" -taos_service_name="taosd" +taos_service_name=${serverName} taosadapter_service_name="taosadapter" tarbitrator_service_name="tarbitratord" nginx_service_name="nginxd" csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " +if command -v sudo >/dev/null; then + csudo="sudo " fi initd_mod=0 service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else +if pidof systemd &>/dev/null; then + service_mod=0 +elif $(which service &>/dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &>/dev/null); then + initd_mod=1 + elif $(which insserv &>/dev/null); then + initd_mod=2 + elif $(which update-rc.d &>/dev/null); then + initd_mod=3 + else service_mod=2 + fi +else + service_mod=2 fi function kill_taosadapter() { pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : + ${csudo}kill -9 $pid || : fi } function kill_taosd() { - pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') + pid=$(ps -ef | grep ${serverName} | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : + ${csudo}kill -9 $pid || : fi } function kill_tarbitrator() { pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : + ${csudo}kill -9 $pid || : fi } function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/taos || : - ${csudo}rm -f ${bin_link_dir}/taosd || : - ${csudo}rm -f ${bin_link_dir}/taosadapter || : - ${csudo}rm -f ${bin_link_dir}/taosdemo || : - ${csudo}rm -f ${bin_link_dir}/taosdump || : - ${csudo}rm -f ${bin_link_dir}/rmtaos || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : + # Remove link + ${csudo}rm -f ${bin_link_dir}/${clientName} || : + ${csudo}rm -f ${bin_link_dir}/${serverName} || : + ${csudo}rm -f ${bin_link_dir}/taosadapter || : + ${csudo}rm -f ${bin_link_dir}/taosdemo || : + ${csudo}rm -f ${bin_link_dir}/taosdump || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + ${csudo}rm -f ${bin_link_dir}/tarbitrator || : + ${csudo}rm -f ${bin_link_dir}/set_core || : + ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : } function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : + # Remove link + ${csudo}rm -f ${lib_link_dir}/libtaos.* || : + ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo}rm -rf ${v15_java_app_dir} || : } function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : + # Remove link + ${csudo}rm -f ${inc_link_dir}/taos.h || : + ${csudo}rm -f ${inc_link_dir}/taosdef.h || : + ${csudo}rm -f ${inc_link_dir}/taoserror.h || : } function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : + # Remove link + ${csudo}rm -f ${cfg_link_dir}/* || : } function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : + # Remove link + ${csudo}rm -rf ${log_link_dir} || : } function clean_service_on_systemd() { - taosd_service_config="${service_config_dir}/${taos_service_name}.service" - if systemctl is-active --quiet ${taos_service_name}; then - echo "TDengine taosd is running, stopping it..." - ${csudo}systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${taosd_service_config} + taosd_service_config="${service_config_dir}/${taos_service_name}.service" + if systemctl is-active --quiet ${taos_service_name}; then + echo "${productName} ${serverName} is running, stopping it..." + ${csudo}systemctl stop ${taos_service_name} &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable ${taos_service_name} &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${taosd_service_config} - taosadapter_service_config="${service_config_dir}/taosadapter.service" - if systemctl is-active --quiet ${taosadapter_service_name}; then - echo "TDengine taosAdapter is running, stopping it..." - ${csudo}systemctl stop ${taosadapter_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${taosadapter_service_name} &> /dev/null || echo &> /dev/null - [ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config} + taosadapter_service_config="${service_config_dir}/taosadapter.service" + if systemctl is-active --quiet ${taosadapter_service_name}; then + echo "${productName} taosAdapter is running, stopping it..." + ${csudo}systemctl stop ${taosadapter_service_name} &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable ${taosadapter_service_name} &>/dev/null || echo &>/dev/null + [ -f ${taosadapter_service_config} ] && ${csudo}rm -f ${taosadapter_service_config} - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "TDengine tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${install_nginxd_dir} ]; then - if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for TDengine is running, stopping it..." - ${csudo}systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${nginx_service_config} - fi + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "${productName} tarbitrator is running, stopping it..." + ${csudo}systemctl stop ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + if [ -d ${install_nginxd_dir} ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for ${productName} is running, stopping it..." + ${csudo}systemctl stop ${nginx_service_name} &>/dev/null || echo &>/dev/null + fi + ${csudo}systemctl disable ${nginx_service_name} &>/dev/null || echo &>/dev/null + ${csudo}rm -f ${nginx_service_config} fi + fi } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof ${serverName} &>/dev/null; then + echo "${productName} ${serverName} is running, stopping it..." + ${csudo}service ${serverName} stop || : + fi - if pidof taosd &> /dev/null; then - echo "TDengine taosd is running, stopping it..." - ${csudo}service taosd stop || : - fi + if pidof tarbitrator &>/dev/null; then + echo "${productName} tarbitrator is running, stopping it..." + ${csudo}service tarbitratord stop || : + fi - if pidof tarbitrator &> /dev/null; then - echo "TDengine tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : + if ((${initd_mod} == 1)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}chkconfig --del ${serverName} || : fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/taosd ]; then - ${csudo}chkconfig --del taosd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/taosd ]; then - ${csudo}insserv -r taosd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/taosd ]; then - ${csudo}update-rc.d -f taosd remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}chkconfig --del tarbitratord || : + fi + elif ((${initd_mod} == 2)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}insserv -r ${serverName} || : fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}insserv -r tarbitratord || : + fi + elif ((${initd_mod} == 3)); then + if [ -e ${service_config_dir}/${serverName} ]; then + ${csudo}update-rc.d -f ${serverName} remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo}update-rc.d -f tarbitratord remove || : + fi + fi - ${csudo}rm -f ${service_config_dir}/taosd || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : + ${csudo}rm -f ${service_config_dir}/${serverName} || : + ${csudo}rm -f ${service_config_dir}/tarbitratord || : - if $(which init &> /dev/null); then - ${csudo}init q || : - fi + if $(which init &>/dev/null); then + ${csudo}init q || : + fi } function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - # must manual stop taosd - kill_taosadapter - kill_taosd - kill_tarbitrator - fi + if ((${service_mod} == 0)); then + clean_service_on_systemd + elif ((${service_mod} == 1)); then + clean_service_on_sysvinit + else + kill_taosadapter + kill_taosd + kill_tarbitrator + fi } # Stop service and disable booting start. @@ -221,7 +220,7 @@ clean_log # Remove link configuration file clean_config # Remove data link directory -${csudo}rm -rf ${data_link_dir} || : +${csudo}rm -rf ${data_link_dir} || : ${csudo}rm -rf ${install_main_dir} ${csudo}rm -rf ${install_nginxd_dir} @@ -231,16 +230,16 @@ else osinfo="" fi -if echo $osinfo | grep -qwi "ubuntu" ; then -# echo "this is ubuntu system" - ${csudo}dpkg --force-all -P tdengine > /dev/null 2>&1 || : -elif echo $osinfo | grep -qwi "debian" ; then -# echo "this is debian system" - ${csudo}dpkg --force-all -P tdengine > /dev/null 2>&1 || : -elif echo $osinfo | grep -qwi "centos" ; then -# echo "this is centos system" - ${csudo}rpm -e --noscripts tdengine > /dev/null 2>&1 || : +if echo $osinfo | grep -qwi "ubuntu"; then + # echo "this is ubuntu system" + ${csudo}dpkg --force-all -P tdengine >/dev/null 2>&1 || : +elif echo $osinfo | grep -qwi "debian"; then + # echo "this is debian system" + ${csudo}dpkg --force-all -P tdengine >/dev/null 2>&1 || : +elif echo $osinfo | grep -qwi "centos"; then + # echo "this is centos system" + ${csudo}rpm -e --noscripts tdengine >/dev/null 2>&1 || : fi -echo -e "${GREEN}TDengine is removed successfully!${NC}" +echo -e "${GREEN}${productName} is removed successfully!${NC}" echo diff --git a/packaging/tools/remove_arbi_jh.sh b/packaging/tools/remove_arbi_jh.sh deleted file mode 100755 index 018bf793d9df80ed5076fa15e3714975e2c3ea80..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_arbi_jh.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall jh_iot's arbitrator - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/tarbitrator" -bin_link_dir="/usr/bin" - -service_config_dir="/etc/systemd/system" -tarbitrator_service_name="tarbitratord" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf /arbitrator.log || : -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "jh_iot's tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - echo "jh_iot's tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - # must manual stop - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -##clean_header -# Remove log file -clean_log - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}jh_iot's arbitrator is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_arbi_kh.sh b/packaging/tools/remove_arbi_kh.sh deleted file mode 100755 index 2d270ea0bef735e1c307970e4d5b45d6eca56622..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_arbi_kh.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall KingHistorian's arbitrator - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/tarbitrator" -bin_link_dir="/usr/bin" - -service_config_dir="/etc/systemd/system" -tarbitrator_service_name="tarbitratord" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf /arbitrator.log || : -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "KingHistorian's tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - echo "KingHistorian's tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - # must manual stop - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -##clean_header -# Remove log file -clean_log - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}KingHistorian's arbitrator is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_arbi_power.sh b/packaging/tools/remove_arbi_power.sh deleted file mode 100755 index 459ca481e32954e8472f5f1c108a597fd59ff129..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_arbi_power.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall PowerDB's arbitrator - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/tarbitrator" -bin_link_dir="/usr/bin" -#inc_link_dir="/usr/include" - -service_config_dir="/etc/systemd/system" -tarbitrator_service_name="tarbitratord" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf /arbitrator.log || : -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "PowerDB tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - echo "PowerDB's tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - # must manual stop - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -##clean_header -# Remove log file -clean_log - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}PowerDB's arbitrator is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_arbi_pro.sh b/packaging/tools/remove_arbi_pro.sh deleted file mode 100755 index 4a5edff1c6f9b006085977a2c3bf9c9e4fa24326..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_arbi_pro.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall ProDB's arbitrator - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/tarbitrator" -bin_link_dir="/usr/bin" - -service_config_dir="/etc/systemd/system" -tarbitrator_service_name="tarbitratord" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf /arbitrator.log || : -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "ProDB tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - echo "ProDB's tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - # must manual stop - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -##clean_header -# Remove log file -clean_log - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}ProDB's arbitrator is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_arbi_tq.sh b/packaging/tools/remove_arbi_tq.sh deleted file mode 100755 index f1b5bed44080f0631689098c6990a5cecdd186f1..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_arbi_tq.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall TQ's arbitrator - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/tarbitrator" -bin_link_dir="/usr/bin" -#inc_link_dir="/usr/include" - -service_config_dir="/etc/systemd/system" -tarbitrator_service_name="tarbitratord" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf /arbitrator.log || : -} - -function clean_service_on_systemd() { - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "TQ tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${tarbitratord_service_config} -} - -function clean_service_on_sysvinit() { - if pidof tarbitrator &> /dev/null; then - echo "TQ's tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - # must manual stop - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -##clean_header -# Remove log file -clean_log - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}TQ's arbitrator is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index b7e5a327bfd3b7d81520737b252111889660084d..f2cbccb45f738c058236e5625a86fc40c161f488 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -8,38 +8,37 @@ RED='\033[0;31m' GREEN='\033[1;32m' NC='\033[0m' +installDir="/usr/local/taos" +clientName="taos" +uninstallScript="rmtaos" + #install main path -install_main_dir="/usr/local/taos" +install_main_dir=${installDir} -log_link_dir="/usr/local/taos/log" -cfg_link_dir="/usr/local/taos/cfg" +log_link_dir=${installDir}/log +cfg_link_dir=${installDir}/cfg bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/taos" - csudo="" if command -v sudo > /dev/null; then csudo="sudo " fi function kill_client() { - #pid=$(ps -ef | grep "taos" | grep -v "grep" | awk '{print $2}') - if [ -n "$(pidof taos)" ]; then + if [ -n "$(pidof ${clientName})" ]; then ${csudo}kill -9 $pid || : fi } function clean_bin() { # Remove link - ${csudo}rm -f ${bin_link_dir}/taos || : + ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/taosdump || : - ${csudo}rm -f ${bin_link_dir}/rmtaos || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/set_core || : } diff --git a/packaging/tools/remove_client_jh.sh b/packaging/tools/remove_client_jh.sh deleted file mode 100755 index 491339fb450f9017004f312032ff5f2dbcaa4164..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_client_jh.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# -# Script to stop the client and uninstall database, but retain the config and log files. -set -e -# set -x - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/jh_taos" - -log_link_dir="/usr/local/jh_taos/log" -cfg_link_dir="/usr/local/jh_taos/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -function kill_client() { - if [ -n "$(pidof jh_taos)" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/jh_taos || : - ${csudo}rm -f ${bin_link_dir}/jhdemo || : - ${csudo}rm -f ${bin_link_dir}/jh_taosdump || : - ${csudo}rm -f ${bin_link_dir}/rmjh || : - ${csudo}rm -f ${bin_link_dir}/set_core || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -# Stop client. -kill_client -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}jh_iot client is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_client_kh.sh b/packaging/tools/remove_client_kh.sh deleted file mode 100755 index 56cf49ca820fcc3077c2253d89955048c5ffa273..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_client_kh.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# -# Script to stop the client and uninstall database, but retain the config and log files. -set -e -# set -x - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/kinghistorian" - -log_link_dir="/usr/local/kinghistorian/log" -cfg_link_dir="/usr/local/kinghistorian/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -function kill_client() { - if [ -n "$(pidof khclient)" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/khclient || : - ${csudo}rm -f ${bin_link_dir}/khdemo || : - ${csudo}rm -f ${bin_link_dir}/khdump || : - ${csudo}rm -f ${bin_link_dir}/rmkh || : - ${csudo}rm -f ${bin_link_dir}/set_core || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -# Stop client. -kill_client -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}KingHistorian client is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_client_power.sh b/packaging/tools/remove_client_power.sh deleted file mode 100755 index f16e7813ef894b23b01b89b365ad2038b5b1bb40..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_client_power.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -# -# Script to stop the client and uninstall database, but retain the config and log files. -set -e -# set -x - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/power" - -log_link_dir="/usr/local/power/log" -cfg_link_dir="/usr/local/power/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/power" - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -function kill_client() { - #pid=$(ps -ef | grep "power" | grep -v "grep" | awk '{print $2}') - if [ -n "$(pidof power)" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/power || : - ${csudo}rm -f ${bin_link_dir}/powerdemo || : - ${csudo}rm -f ${bin_link_dir}/powerdump || : - ${csudo}rm -f ${bin_link_dir}/rmpower || : - ${csudo}rm -f ${bin_link_dir}/set_core || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -# Stop client. -kill_client -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}PowerDB client is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_client_pro.sh b/packaging/tools/remove_client_pro.sh deleted file mode 100755 index 3ed0cef1e98896f8160ae3d5a0c5e89bfc01500f..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_client_pro.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash -# -# Script to stop the client and uninstall database, but retain the config and log files. -set -e -# set -x - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/ProDB" - -log_link_dir="/usr/local/ProDB/log" -cfg_link_dir="/usr/local/ProDB/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -function kill_client() { - if [ -n "$(pidof prodbc)" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/prodbc || : - ${csudo}rm -f ${bin_link_dir}/prodemo || : - ${csudo}rm -f ${bin_link_dir}/prodump || : - ${csudo}rm -f ${bin_link_dir}/rmprodb || : - ${csudo}rm -f ${bin_link_dir}/set_core || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -# Stop client. -kill_client -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}ProDB client is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_client_tq.sh b/packaging/tools/remove_client_tq.sh deleted file mode 100755 index a35eb9d46e4e33783b47fa33703764c10ee87b9b..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_client_tq.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -# -# Script to stop the client and uninstall database, but retain the config and log files. -set -e -# set -x - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/tq" - -log_link_dir="/usr/local/tq/log" -cfg_link_dir="/usr/local/tq/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" - - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/tq" - -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -function kill_client() { - #pid=$(ps -ef | grep "tq" | grep -v "grep" | awk '{print $2}') - if [ -n "$(pidof tq)" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/tq || : - ${csudo}rm -f ${bin_link_dir}/tqdemo || : - ${csudo}rm -f ${bin_link_dir}/tqdump || : - ${csudo}rm -f ${bin_link_dir}/rmtq || : - ${csudo}rm -f ${bin_link_dir}/set_core || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -# Stop client. -kill_client -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config - -${csudo}rm -rf ${install_main_dir} - -echo -e "${GREEN}TQ client is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_jh.sh b/packaging/tools/remove_jh.sh deleted file mode 100755 index 7b3abff42ad662160cf984adf73abeb51116a422..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_jh.sh +++ /dev/null @@ -1,210 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall jh_taos, but retain the config, data and log files. - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/jh_taos" -data_link_dir="/usr/local/jh_taos/data" -log_link_dir="/usr/local/jh_taos/log" -cfg_link_dir="/usr/local/jh_taos/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" -install_nginxd_dir="/usr/local/nginxd" - -service_config_dir="/etc/systemd/system" -service_name="jh_taosd" -tarbitrator_service_name="tarbitratord" -nginx_service_name="nginxd" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_process() { - pid=$(ps -ef | grep "jh_taosd" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/jh_taos || : - ${csudo}rm -f ${bin_link_dir}/jh_taosd || : - ${csudo}rm -f ${bin_link_dir}/jhdemo || : - ${csudo}rm -f ${bin_link_dir}/jh_taosdump || : - ${csudo}rm -f ${bin_link_dir}/rmjh || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -function clean_service_on_systemd() { - service_config="${service_config_dir}/${service_name}.service" - if systemctl is-active --quiet ${service_name}; then - echo "jh_iot's jh_taosd is running, stopping it..." - ${csudo}systemctl stop ${service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${service_config} - - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "jh_iot's tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${bin_dir}/web ]; then - if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for jh_iot is running, stopping it..." - ${csudo}systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${nginx_service_config} - fi - fi -} - -function clean_service_on_sysvinit() { - if pidof jh_taosd &> /dev/null; then - echo "jh_iot's jh_taosd is running, stopping it..." - ${csudo}service jh_taosd stop || : - fi - - if pidof tarbitrator &> /dev/null; then - echo "jh_iot's tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/jh_taosd ]; then - ${csudo}chkconfig --del jh_taosd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/jh_taosd ]; then - ${csudo}insserv -r jh_taosd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/jh_taosd ]; then - ${csudo}update-rc.d -f jh_taosd remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/jh_taosd || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - kill_process - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config -# Remove data link directory -${csudo}rm -rf ${data_link_dir} || : - -${csudo}rm -rf ${install_main_dir} -${csudo}rm -rf ${install_nginxd_dir} -if [[ -e /etc/os-release ]]; then - osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -else - osinfo="" -fi - -echo -e "${GREEN}jh_iot is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_kh.sh b/packaging/tools/remove_kh.sh deleted file mode 100755 index 0c830455c7ef4950384f456ed55a7dd9ca706bfb..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_kh.sh +++ /dev/null @@ -1,210 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall kinghistorian, but retain the config, data and log files. - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/kinghistorian" -data_link_dir="/usr/local/kinghistorian/data" -log_link_dir="/usr/local/kinghistorian/log" -cfg_link_dir="/usr/local/kinghistorian/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" -install_nginxd_dir="/usr/local/nginxd" - -service_config_dir="/etc/systemd/system" -service_name="khserver" -tarbitrator_service_name="tarbitratord" -nginx_service_name="nginxd" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_process() { - pid=$(ps -ef | grep "khserver" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/khclient || : - ${csudo}rm -f ${bin_link_dir}/khserver || : - ${csudo}rm -f ${bin_link_dir}/khdemo || : - ${csudo}rm -f ${bin_link_dir}/khdump || : - ${csudo}rm -f ${bin_link_dir}/rmkh || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -function clean_service_on_systemd() { - service_config="${service_config_dir}/${service_name}.service" - if systemctl is-active --quiet ${service_name}; then - echo "KingHistorian's khserver is running, stopping it..." - ${csudo}systemctl stop ${service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${service_config} - - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "KingHistorian's tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${bin_dir}/web ]; then - if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for KingHistorian is running, stopping it..." - ${csudo}systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${nginx_service_config} - fi - fi -} - -function clean_service_on_sysvinit() { - if pidof khserver &> /dev/null; then - echo "KingHistorian's khserver is running, stopping it..." - ${csudo}service khserver stop || : - fi - - if pidof tarbitrator &> /dev/null; then - echo "KingHistorian's tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/khserver ]; then - ${csudo}chkconfig --del khserver || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/khserver ]; then - ${csudo}insserv -r khserver || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/khserver ]; then - ${csudo}update-rc.d -f khserver remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/khserver || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - kill_process - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config -# Remove data link directory -${csudo}rm -rf ${data_link_dir} || : - -${csudo}rm -rf ${install_main_dir} -${csudo}rm -rf ${install_nginxd_dir} -if [[ -e /etc/os-release ]]; then - osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -else - osinfo="" -fi - -echo -e "${GREEN}KingHistorian is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_power.sh b/packaging/tools/remove_power.sh deleted file mode 100755 index 4216147bdf63b3e102d94a668abafd1de18c5ac1..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_power.sh +++ /dev/null @@ -1,225 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall PowerDB, but retain the config, data and log files. - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/power" -data_link_dir="/usr/local/power/data" -log_link_dir="/usr/local/power/log" -cfg_link_dir="/usr/local/power/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" -install_nginxd_dir="/usr/local/nginxd" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/power" - -service_config_dir="/etc/systemd/system" -power_service_name="powerd" -tarbitrator_service_name="tarbitratord" -nginx_service_name="nginxd" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_powerd() { - pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/power || : - ${csudo}rm -f ${bin_link_dir}/powerd || : - ${csudo}rm -f ${bin_link_dir}/powerdemo || : - ${csudo}rm -f ${bin_link_dir}/powerdump || : - ${csudo}rm -f ${bin_link_dir}/rmpower || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -function clean_service_on_systemd() { - power_service_config="${service_config_dir}/${power_service_name}.service" - if systemctl is-active --quiet ${power_service_name}; then - echo "PowerDB powerd is running, stopping it..." - ${csudo}systemctl stop ${power_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${power_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${power_service_config} - - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "PowerDB tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${bin_dir}/web ]; then - if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for PowerDB is running, stopping it..." - ${csudo}systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${nginx_service_config} - fi - fi -} - -function clean_service_on_sysvinit() { - if pidof powerd &> /dev/null; then - echo "PowerDB powerd is running, stopping it..." - ${csudo}service powerd stop || : - fi - - if pidof tarbitrator &> /dev/null; then - echo "PowerDB tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/powerd ]; then - ${csudo}chkconfig --del powerd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/powerd ]; then - ${csudo}insserv -r powerd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/powerd ]; then - ${csudo}update-rc.d -f powerd remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/powerd || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - kill_powerd - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config -# Remove data link directory -${csudo}rm -rf ${data_link_dir} || : - -${csudo}rm -rf ${install_main_dir} -${csudo}rm -rf ${install_nginxd_dir} -if [[ -e /etc/os-release ]]; then - osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -else - osinfo="" -fi - -#if echo $osinfo | grep -qwi "ubuntu" ; then -## echo "this is ubuntu system" -# ${csudo}rm -f /var/lib/dpkg/info/tdengine* || : -#elif echo $osinfo | grep -qwi "debian" ; then -## echo "this is debian system" -# ${csudo}rm -f /var/lib/dpkg/info/tdengine* || : -#elif echo $osinfo | grep -qwi "centos" ; then -## echo "this is centos system" -# ${csudo}rpm -e --noscripts tdengine || : -#fi - -echo -e "${GREEN}PowerDB is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_pro.sh b/packaging/tools/remove_pro.sh deleted file mode 100755 index f9e8069bb5bb14b44520d0bea3edf2225e6d6b06..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_pro.sh +++ /dev/null @@ -1,211 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall ProDB, but retain the config, data and log files. - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/ProDB" -data_link_dir="/usr/local/ProDB/data" -log_link_dir="/usr/local/ProDB/log" -cfg_link_dir="/usr/local/ProDB/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" -install_nginxd_dir="/usr/local/nginxd" - -service_config_dir="/etc/systemd/system" -prodb_service_name="prodbs" -tarbitrator_service_name="tarbitratord" -nginx_service_name="nginxd" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_prodbs() { - pid=$(ps -ef | grep "prodbs" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/prodbc || : - ${csudo}rm -f ${bin_link_dir}/prodbs || : - ${csudo}rm -f ${bin_link_dir}/prodemo || : - ${csudo}rm -f ${bin_link_dir}/prodump || : - ${csudo}rm -f ${bin_link_dir}/rmprodb || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -function clean_service_on_systemd() { - prodb_service_config="${service_config_dir}/${prodb_service_name}.service" - if systemctl is-active --quiet ${prodb_service_name}; then - echo "ProDB prodbs is running, stopping it..." - ${csudo}systemctl stop ${prodb_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${prodb_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${prodb_service_config} - - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "ProDB tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${bin_dir}/web ]; then - if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for ProDB is running, stopping it..." - ${csudo}systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${nginx_service_config} - fi - fi -} - -function clean_service_on_sysvinit() { - if pidof prodbs &> /dev/null; then - echo "ProDB prodbs is running, stopping it..." - ${csudo}service prodbs stop || : - fi - - if pidof tarbitrator &> /dev/null; then - echo "ProDB tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/prodbs ]; then - ${csudo}chkconfig --del prodbs || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/prodbs ]; then - ${csudo}insserv -r prodbs || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/prodbs ]; then - ${csudo}update-rc.d -f prodbs remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/prodbs || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - kill_prodbs - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config -# Remove data link directory -${csudo}rm -rf ${data_link_dir} || : - -${csudo}rm -rf ${install_main_dir} -${csudo}rm -rf ${install_nginxd_dir} -if [[ -e /etc/os-release ]]; then - osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -else - osinfo="" -fi - -echo -e "${GREEN}ProDB is removed successfully!${NC}" -echo diff --git a/packaging/tools/remove_tq.sh b/packaging/tools/remove_tq.sh deleted file mode 100755 index e9a071f7d6b6bd5ae89c12cf8db5de014dbc2d52..0000000000000000000000000000000000000000 --- a/packaging/tools/remove_tq.sh +++ /dev/null @@ -1,217 +0,0 @@ -#!/bin/bash -# -# Script to stop the service and uninstall TQ, but retain the config, data and log files. - -set -e -#set -x - -verMode=edge - -RED='\033[0;31m' -GREEN='\033[1;32m' -NC='\033[0m' - -#install main path -install_main_dir="/usr/local/tq" -data_link_dir="/usr/local/tq/data" -log_link_dir="/usr/local/tq/log" -cfg_link_dir="/usr/local/tq/cfg" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -lib64_link_dir="/usr/lib64" -inc_link_dir="/usr/include" -install_nginxd_dir="/usr/local/nginxd" - -# v1.5 jar dir -#v15_java_app_dir="/usr/local/lib/tq" - -service_config_dir="/etc/systemd/system" -tq_service_name="tqd" -tarbitrator_service_name="tarbitratord" -nginx_service_name="nginxd" -csudo="" -if command -v sudo > /dev/null; then - csudo="sudo " -fi - -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 - else - service_mod=2 - fi -else - service_mod=2 -fi - -function kill_tqd() { - pid=$(ps -ef | grep "tqd" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} - -function kill_tarbitrator() { - pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo}kill -9 $pid || : - fi -} -function clean_bin() { - # Remove link - ${csudo}rm -f ${bin_link_dir}/tq || : - ${csudo}rm -f ${bin_link_dir}/tqd || : - ${csudo}rm -f ${bin_link_dir}/tqdemo || : - ${csudo}rm -f ${bin_link_dir}/tqdump || : - ${csudo}rm -f ${bin_link_dir}/rmtq || : - ${csudo}rm -f ${bin_link_dir}/tarbitrator || : - ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_taosd.sh || : -} - -function clean_lib() { - # Remove link - ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo}rm -rf ${v15_java_app_dir} || : -} - -function clean_header() { - # Remove link - ${csudo}rm -f ${inc_link_dir}/taos.h || : - ${csudo}rm -f ${inc_link_dir}/taosdef.h || : - ${csudo}rm -f ${inc_link_dir}/taoserror.h || : -} - -function clean_config() { - # Remove link - ${csudo}rm -f ${cfg_link_dir}/* || : -} - -function clean_log() { - # Remove link - ${csudo}rm -rf ${log_link_dir} || : -} - -function clean_service_on_systemd() { - tq_service_config="${service_config_dir}/${tq_service_name}.service" - if systemctl is-active --quiet ${tq_service_name}; then - echo "TQ tqd is running, stopping it..." - ${csudo}systemctl stop ${tq_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tq_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tq_service_config} - - tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" - if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "TQ tarbitrator is running, stopping it..." - ${csudo}systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null - ${csudo}rm -f ${tarbitratord_service_config} - - if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${bin_dir}/web ]; then - if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for TQ is running, stopping it..." - ${csudo}systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null - fi - ${csudo}systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null - - ${csudo}rm -f ${nginx_service_config} - fi - fi -} - -function clean_service_on_sysvinit() { - #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start" - #${csudo}sed -i "\|${restart_config_str}|d" /etc/inittab || : - - if pidof tqd &> /dev/null; then - echo "TQ tqd is running, stopping it..." - ${csudo}service tqd stop || : - fi - - if pidof tarbitrator &> /dev/null; then - echo "TQ tarbitrator is running, stopping it..." - ${csudo}service tarbitratord stop || : - fi - - if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/tqd ]; then - ${csudo}chkconfig --del tqd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}chkconfig --del tarbitratord || : - fi - elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/tqd ]; then - ${csudo}insserv -r tqd || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}insserv -r tarbitratord || : - fi - elif ((${initd_mod}==3)); then - if [ -e ${service_config_dir}/tqd ]; then - ${csudo}update-rc.d -f tqd remove || : - fi - if [ -e ${service_config_dir}/tarbitratord ]; then - ${csudo}update-rc.d -f tarbitratord remove || : - fi - fi - - ${csudo}rm -f ${service_config_dir}/tqd || : - ${csudo}rm -f ${service_config_dir}/tarbitratord || : - - if $(which init &> /dev/null); then - ${csudo}init q || : - fi -} - -function clean_service() { - if ((${service_mod}==0)); then - clean_service_on_systemd - elif ((${service_mod}==1)); then - clean_service_on_sysvinit - else - kill_tqd - kill_tarbitrator - fi -} - -# Stop service and disable booting start. -clean_service -# Remove binary file and links -clean_bin -# Remove header file. -clean_header -# Remove lib file -clean_lib -# Remove link log directory -clean_log -# Remove link configuration file -clean_config -# Remove data link directory -${csudo}rm -rf ${data_link_dir} || : - -${csudo}rm -rf ${install_main_dir} -${csudo}rm -rf ${install_nginxd_dir} -if [[ -e /etc/os-release ]]; then - osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -else - osinfo="" -fi - -echo -e "${GREEN}TQ is removed successfully!${NC}" -echo diff --git a/packaging/tools/run_taosd.sh b/packaging/tools/run_taosd_and_taosadapter.sh similarity index 100% rename from packaging/tools/run_taosd.sh rename to packaging/tools/run_taosd_and_taosadapter.sh diff --git a/packaging/tools/startPre.sh b/packaging/tools/startPre.sh index 8a0ab11a4d37ffb9ad244faa2946cbbf10ce2026..341e88ab2eaccd2eb8d1b985194ed687cd7c4479 100755 --- a/packaging/tools/startPre.sh +++ b/packaging/tools/startPre.sh @@ -4,13 +4,16 @@ # set -e # set -x -taosd=/etc/systemd/system/taosd.service -line=`grep StartLimitBurst ${taosd}` +serverName="taosd" +logDir="/var/log/taos" + +taosd=/etc/systemd/system/${serverName}.service +line=$(grep StartLimitBurst ${taosd}) num=${line##*=} #echo "burst num: ${num}" -startSeqFile=/var/log/taos/.startSeq -recordFile=/var/log/taos/.startRecord +startSeqFile=${logDir}/.startSeq +recordFile=${logDir}/.startRecord startSeq=0 @@ -20,31 +23,29 @@ else startSeq=$(cat ${startSeqFile}) fi -nextSeq=`expr $startSeq + 1` -echo "${nextSeq}" > ${startSeqFile} +nextSeq=$(expr $startSeq + 1) +echo "${nextSeq}" >${startSeqFile} curTime=$(date "+%Y-%m-%d %H:%M:%S") -echo "startSeq:${startSeq} startPre.sh exec ${curTime}, burstCnt:${num}" >> ${recordFile} - +echo "startSeq:${startSeq} startPre.sh exec ${curTime}, burstCnt:${num}" >>${recordFile} -coreFlag=`ulimit -c` -echo "coreFlag: ${coreFlag}" >> ${recordFile} +coreFlag=$(ulimit -c) +echo "coreFlag: ${coreFlag}" >>${recordFile} -if [ ${coreFlag} = "0" ];then +if [ ${coreFlag} = "0" ]; then #echo "core is 0" - if [ ${num} != "20" ];then + if [ ${num} != "20" ]; then sed -i "s/^.*StartLimitBurst.*$/StartLimitBurst=20/" ${taosd} systemctl daemon-reload - echo "modify burst count from ${num} to 20" >> ${recordFile} + echo "modify burst count from ${num} to 20" >>${recordFile} fi fi -if [ ${coreFlag} = "unlimited" ];then +if [ ${coreFlag} = "unlimited" ]; then #echo "core is unlimited" - if [ ${num} != "3" ];then + if [ ${num} != "3" ]; then sed -i "s/^.*StartLimitBurst.*$/StartLimitBurst=3/" ${taosd} systemctl daemon-reload - echo "modify burst count from ${num} to 3" >> ${recordFile} + echo "modify burst count from ${num} to 3" >>${recordFile} fi fi - diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 2ddae0f903a6c42235343a6dd526d37e53147734..036a95fe15b0062fe5daff336cb4e6bda85b34b6 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -313,7 +313,12 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) { } assert(pSql->res.code != TSDB_CODE_SUCCESS); - tscError("0x%"PRIx64" async result callback, code:%s", pSql->self, tstrerror(pSql->res.code)); + if (tsShortcutFlag) { + tscDebug("0x%" PRIx64 " async result callback, code:%s", pSql->self, tstrerror(pSql->res.code)); + pSql->res.code = TSDB_CODE_SUCCESS; + } else { + tscError("0x%" PRIx64 " async result callback, code:%s", pSql->self, tstrerror(pSql->res.code)); + } SSqlRes *pRes = &pSql->res; if (pSql->fp == NULL || pSql->fetchFp == NULL){ diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 85c2215a2e71746889403e60ed09279e64574750..bb3792850620b0a07ca599cef42906a6854a368a 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -485,7 +485,26 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) { if (row == NULL) { return TSDB_CODE_TSC_DB_NOT_SELECTED; } - const char *showColumns[] = {"REPLICA", "QUORUM", "DAYS", "KEEP", "BLOCKS", NULL}; + const char *showColumns[][2] = { + {"REPLICA", "REPLICA"}, + {"QUORUM", "QUORUM"}, + {"DAYS", "DAYS"}, +#ifdef _STORAGE + {"KEEP0,KEEP1,KEEP2", "KEEP"}, +#else + {"KEEP", "KEEP"}, +#endif + {"CACHE(MB)", "CACHE"}, + {"BLOCKS", "BLOCKS"}, + {"MINROWS", "MINROWS"}, + {"MAXROWS", "MAXROWS"}, + {"WALLEVEL", "WAL"}, + {"FSYNC", "FSYNC"}, + {"COMP", "COMP"}, + {"CACHELAST", "CACHELAST"}, + {"PRECISION", "PRECISION"}, + {"UPDATE", "UPDATE"}, + {NULL, NULL}}; SSqlObj *pSql = builder->pInterSql; TAOS_FIELD *fields = taos_fetch_fields(pSql); @@ -499,12 +518,16 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) { if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf); for (int i = 1; i < num_fields; i++) { - for (int j = 0; showColumns[j] != NULL; j++) { - if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j], strlen(showColumns[j]))) { + for (int j = 0; showColumns[j][0] != NULL; j++) { + if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j][0], strlen(showColumns[j][0]))) { memset(buf, 0, sizeof(buf)); ret = tscGetNthFieldResult(row, fields, lengths, i, buf); if (ret == 0) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j], buf); + if (STR_NOCASE_EQUAL(showColumns[j][0], strlen(showColumns[j][0]), "PRECISION", strlen("PRECISION"))) { + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s '%s'", showColumns[j][1], buf); + } else { + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j][1], buf); + } } } } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 19d537eb11a84ef7a5e64428b060a198f3497fb6..73337469b2b9a7ad696ba9a6dc24c734accd109a 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1584,9 +1584,6 @@ int tsInsertInitialCheck(SSqlObj *pSql) { int32_t index = 0; SSqlCmd *pCmd = &pSql->cmd; - SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false); - assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT); - pCmd->count = 0; pCmd->command = TSDB_SQL_INSERT; SInsertStatementParam* pInsertParam = &pCmd->insertParam; @@ -1594,6 +1591,11 @@ int tsInsertInitialCheck(SSqlObj *pSql) { SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd); TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT); + SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false); + if (sToken.type != TK_INSERT && sToken.type != TK_IMPORT) { + return tscSQLSyntaxErrMsg(pInsertParam->msg, NULL, sToken.z); + } + sToken = tStrGetToken(pSql->sqlstr, &index, false); if (sToken.type != TK_INTO) { return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword INTO is expected", sToken.z); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index ea6ad8a48d7a9c4b5e992dffe8329735a0f47512..454f15829874a51a38428f3ffb420e4704e2151b 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1658,7 +1658,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pRes->qId = 0; pRes->numOfRows = 0; - strcpy(pSql->sqlstr, sql); + strntolower(pSql->sqlstr, sql, (int32_t)sqlLen); tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); if (tscIsInsertData(pSql->sqlstr)) { @@ -1849,6 +1849,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags tscResetSqlCmd(pCmd, false, pSql->self); pCmd->insertParam.pTableBlockHashList = hashList; } + code = tsParseSql(pStmt->pSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { @@ -2006,9 +2007,23 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in STscStmt* pStmt = (STscStmt*)stmt; STMT_CHECK - if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX || colIdx < 0) { - tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self); - STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param")); + if (bind == NULL) { + tscError("0x%" PRIx64 " invalid parameter: bind is NULL", pStmt->pSql->self); + STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param: bind is NULL")); + } + + if (bind->num <= 0 || bind->num > INT16_MAX) { + char errMsg[128]; + sprintf(errMsg, "invalid parameter: bind->num:%d out of range [0, %d)", bind->num, INT16_MAX); + tscError("0x%" PRIx64 " %s", pStmt->pSql->self, errMsg); + STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), errMsg)); + } + + if (colIdx < 0) { + char errMsg[128]; + sprintf(errMsg, "invalid parameter: column index:%d less than 0", colIdx); + tscError("0x%" PRIx64 " %s", pStmt->pSql->self, errMsg); + STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), errMsg)); } if (!pStmt->isInsert) { diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index c682138a354c312815060838120113e0f0f47004..bfc3b9f0be1cedfe1e7c3d66f1fa3ab16f5d37f3 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -280,7 +280,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { // } else { // pQdesc->stableQuery = 0; // } - + pthread_mutex_lock(&pSql->subState.mutex); if (pSql->pSubs != NULL && pSql->subState.states != NULL) { for (int32_t i = 0; i < pQdesc->numOfSub; ++i) { SSqlObj *psub = pSql->pSubs[i]; @@ -295,6 +295,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { p += len; } } + pthread_mutex_unlock(&pSql->subState.mutex); } pQdesc->numOfSub = htonl(pQdesc->numOfSub); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 45436754314dd4289ee335b41fcc3d3926335510..e439e947fa6e5a39e016eb9eb37e61d1634607f2 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2431,7 +2431,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t if (pTableMeta->tableType != TSDB_TEMP_TABLE) { tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMeta->id.uid); } - } else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT) { // simple column projection query + } else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT || tokenId == TK_BOOL) { // simple column projection query SColumnIndex index = COLUMN_INDEX_INITIALIZER; // user-specified constant value as a new result column @@ -2684,6 +2684,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const char* msg25 = "count param should be greater than 0"; const char* msg26 = "start param cannot be 0 with 'log_bin'"; const char* msg27 = "factor param cannot be negative or equal to 0/1"; + const char* msg28 = "the second paramter of diff should be 0 or 1"; + switch (functionId) { case TSDB_FUNC_COUNT: { @@ -2782,9 +2784,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // no parameters or more than one parameter for function if (pItem->pNode->Expr.paramList == NULL || - (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED && numOfParams != 1) || + (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED && functionId != TSDB_FUNC_DIFF + && numOfParams != 1) || ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3) || - (functionId == TSDB_FUNC_ELAPSED && numOfParams != 1 && numOfParams != 2)) { + (functionId == TSDB_FUNC_ELAPSED && numOfParams != 1 && numOfParams != 2) || + (functionId == TSDB_FUNC_DIFF && numOfParams != 1 && numOfParams != 2)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2917,6 +2921,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); } + } else if (functionId == TSDB_FUNC_DIFF) { + char val[8] = {0}; + if (numOfParams == 2) { + tVariant* variantDiff = &pParamElem[1].pNode->value; + if (variantDiff->nType != TSDB_DATA_TYPE_BIGINT) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + tVariantDump(variantDiff, val, TSDB_DATA_TYPE_BIGINT, true); + + int64_t ignoreNegative = GET_INT64_VAL(val); + if (ignoreNegative != 0 && ignoreNegative != 1) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg28); + } + } + tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); } SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); @@ -3679,10 +3698,7 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken strncpy(tmpTokenBuf, pToken->z, pToken->n); pToken->z = tmpTokenBuf; - - if (pToken->type == TK_ID) { - pToken->n = stringProcess(pToken->z, pToken->n); - } + pToken->n = stringProcess(pToken->z, pToken->n); for (int16_t i = 0; i < numOfCols; ++i) { if (pToken->n != strlen(pSchema[i].name)) { @@ -3775,7 +3791,10 @@ int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColum int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) { SStrToken tableToken = {0}; - extractTableNameFromToken(pToken, &tableToken); + + if (pToken->z && (pToken->z[0] != TS_BACKQUOTE_CHAR || pToken->z[pToken->n - 1] != TS_BACKQUOTE_CHAR)) { + extractTableNameFromToken(pToken, &tableToken); + } if (getTableIndexImpl(&tableToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -4892,7 +4911,7 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, return TSDB_CODE_SUCCESS; } -static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, +static int32_t validateSQLExprItemOperatorExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type, uint64_t* uid, int32_t* height) { uint64_t uidLeft = 0; uint64_t uidRight = 0; @@ -4900,6 +4919,9 @@ static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, int32_t rightType = SQLEXPR_TYPE_UNASSIGNED; const char* msg1 = "arithmetic expression composed with columns from different tables"; const char* msg2 = "arithmetic expression composed with functions/columns of different types"; + const char* msg3 = "comparison/logical expression involving string operands is not supported"; + const char* msg4 = "comparison/logical expression involving function result is not supported"; + int32_t leftHeight = 0; int32_t ret = validateSQLExprItem(pCmd, pExpr->pLeft, pQueryInfo, pList, &leftType, &uidLeft, &leftHeight); if (ret != TSDB_CODE_SUCCESS) { @@ -4918,7 +4940,9 @@ static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, *height = (leftHeight > rightHeight) ? leftHeight + 1 : rightHeight+1; { - assert(leftType != SQLEXPR_TYPE_UNASSIGNED && rightType != SQLEXPR_TYPE_UNASSIGNED); + if (leftType == SQLEXPR_TYPE_UNASSIGNED || rightType == SQLEXPR_TYPE_UNASSIGNED) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "invalid operand expression"); + } // return invalid operation when one child aggregate and the other child scalar or column if ((leftType == SQLEXPR_TYPE_AGG && rightType == SQLEXPR_TYPE_SCALAR) || (rightType == SQLEXPR_TYPE_AGG && leftType == SQLEXPR_TYPE_SCALAR)) { @@ -4932,6 +4956,26 @@ static int32_t validateSQLExprItemArithmeticExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, } else if (leftType == SQLEXPR_TYPE_SCALAR || rightType == SQLEXPR_TYPE_SCALAR){ *type = SQLEXPR_TYPE_SCALAR; } + + // comparison/logical operations + if (pExpr->tokenId == TK_EQ || pExpr->tokenId == TK_NE || + pExpr->tokenId == TK_GT || pExpr->tokenId == TK_GE || + pExpr->tokenId == TK_LT || pExpr->tokenId == TK_LE || + pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) { + if ((leftType == SQLEXPR_TYPE_VALUE && pExpr->pLeft->tokenId == TK_STRING) || + (rightType == SQLEXPR_TYPE_VALUE && pExpr->pRight->tokenId == TK_STRING)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + if (leftType == SQLEXPR_TYPE_AGG || leftType == SQLEXPR_TYPE_SCALAR || + rightType == SQLEXPR_TYPE_AGG || rightType == SQLEXPR_TYPE_SCALAR) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + } else if (pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL || + pExpr->tokenId == TK_IS || pExpr->tokenId == TK_LIKE || + pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH || + pExpr->tokenId == TK_CONTAINS || pExpr->tokenId == TK_IN) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "unsupported filtering operations"); + } } return TSDB_CODE_SUCCESS; } @@ -4946,7 +4990,7 @@ static int32_t validateSQLExprItem(SSqlCmd* pCmd, tSqlExpr* pExpr, return TSDB_CODE_SUCCESS; } if (pExpr->type == SQL_NODE_EXPR) { - int32_t ret = validateSQLExprItemArithmeticExpr(pCmd, pExpr, pQueryInfo, pList, type, uid, height); + int32_t ret = validateSQLExprItemOperatorExpr(pCmd, pExpr, pQueryInfo, pList, type, uid, height); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -5902,6 +5946,15 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) { if (!p->_node.pLeft || !p->_node.pRight) { break; } + + int32_t retVal = TSDB_CODE_SUCCESS; + if (p->_node.pLeft && (retVal = validateTagCondExpr(pCmd, p->_node.pLeft)) != TSDB_CODE_SUCCESS) { + return retVal; + } + + if (p->_node.pRight && (retVal = validateTagCondExpr(pCmd, p->_node.pRight)) != TSDB_CODE_SUCCESS) { + return retVal; + } if (IS_ARITHMETIC_OPTR(p->_node.optr)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); @@ -5937,8 +5990,6 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) { } else if (schemaType == TSDB_DATA_TYPE_FLOAT || schemaType == TSDB_DATA_TYPE_DOUBLE) { schemaType = TSDB_DATA_TYPE_DOUBLE; } - - int32_t retVal = TSDB_CODE_SUCCESS; int32_t bufLen = 0; if (IS_NUMERIC_TYPE(vVariant->nType)) { @@ -6904,7 +6955,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } if (tscIsProjectionQuery(pQueryInfo)) { - bool found = false; + bool found = false; for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == pSchema[index.columnIndex].colId) { @@ -6916,10 +6967,10 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (!found) { int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); - + SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols); pSupInfo->visible = false; - + pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } } @@ -6940,17 +6991,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg4 = "set tag value only available for table"; const char* msg5 = "only support add one tag"; const char* msg6 = "column can only be modified by super table"; - + const char* msg7 = "no tags can be dropped"; const char* msg8 = "only support one tag"; const char* msg9 = "tag name too long"; - + const char* msg10 = "invalid tag name"; const char* msg11 = "primary tag cannot be dropped"; const char* msg12 = "update normal column not supported"; const char* msg13 = "invalid tag value"; const char* msg14 = "tag value too long"; - + const char* msg15 = "no columns can be dropped"; const char* msg16 = "only support one column"; const char* msg17 = "invalid column name"; @@ -6958,7 +7009,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg19 = "invalid new tag name"; const char* msg20 = "table is not super table"; const char* msg21 = "only binary/nchar column length could be modified"; - const char* msg23 = "only column length coulbe be modified"; + const char* msg23 = "only column length can be be modified"; const char* msg24 = "invalid binary/nchar column length"; const char* msg25 = "json type error, should be string"; @@ -7021,7 +7072,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (ret != TSDB_CODE_SUCCESS) { return ret; } - + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) { if (tscGetNumOfTags(pTableMeta) == 1) { @@ -7039,7 +7090,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } SColumnIndex index = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING}; + SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen}; if (getColumnIndexByName(&name, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -7054,7 +7105,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { char name1[128] = {0}; strncpy(name1, pItem->pVar.pz, pItem->pVar.nLen); - + stringProcess(name1, (int32_t)strlen(name1)); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { @@ -7077,12 +7128,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER; SColumnIndex destIndex = COLUMN_INDEX_INITIALIZER; - SStrToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING}; + SStrToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen}; if (getColumnIndexByName(&srcToken, pQueryInfo, &srcIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsg, msg17); } - SStrToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING}; + SStrToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen}; if (getColumnIndexByName(&destToken, pQueryInfo, &destIndex, tscGetErrorMsgPayload(pCmd)) == TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsg, msg19); } @@ -7091,6 +7142,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { char name[TSDB_COL_NAME_LEN] = {0}; strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); + stringProcess(name, (int32_t)strlen(name)); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypes[TSDB_DATA_TYPE_INT].bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); @@ -7098,19 +7150,20 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { memset(name, 0, tListLen(name)); strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); + stringProcess(name, (int32_t)strlen(name)); f = tscCreateField(TSDB_DATA_TYPE_INT, name, tDataTypes[TSDB_DATA_TYPE_INT].bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { // Note: update can only be applied to table not super table. // the following is used to handle tags value for table created according to super table pCmd->command = TSDB_SQL_UPDATE_TAGS_VAL; - + SArray* pVarList = pAlterSQL->varList; tVariantListItem* item = taosArrayGet(pVarList, 0); int16_t numOfTags = tscGetNumOfTags(pTableMeta); SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = item->pVar.pz, .n = item->pVar.nLen}; + SStrToken name = {.z = item->pVar.pz, .n = item->pVar.nLen}; if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -7243,14 +7296,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0); SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; - - //handle Escape character backstick - bool inEscape = false; - if (name.z[0] == TS_BACKQUOTE_CHAR && name.z[name.n - 1] == TS_BACKQUOTE_CHAR) { - inEscape = true; - name.type = TK_ID; - } + SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen}; if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsg, msg17); @@ -7262,12 +7308,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { char name1[TSDB_COL_NAME_LEN] = {0}; tstrncpy(name1, pItem->pVar.pz, sizeof(name1)); - - int32_t nameLen = pItem->pVar.nLen; - if (inEscape) { - memmove(name1, name1 + 1, nameLen); - name1[nameLen - TS_BACKQUOTE_CHAR_SIZE] = '\0'; - } + stringProcess(name1, (int32_t)strlen(name1)); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); @@ -7276,21 +7317,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(pMsg, msg16); } - TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0); if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) { return invalidOperationMsg(pMsg, msg21); } SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)}; - - //handle Escape character backstick - bool inEscape = false; - if (name.z[0] == TS_BACKQUOTE_CHAR && name.z[name.n - 1] == TS_BACKQUOTE_CHAR) { - inEscape = true; - name.type = TK_ID; - } + SStrToken name = {.z = pItem->name, .n = (uint32_t)strlen(pItem->name)}; if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsg, msg17); @@ -7326,12 +7359,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(pMsg, msg24); } - if (inEscape) { - memmove(name.z, name.z + 1, name.n); - name.z[name.n - TS_BACKQUOTE_CHAR_SIZE] = '\0'; - name.n -= TS_BACKQUOTE_CHAR_SIZE; - } - + stringProcess(name.z, name.n); TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); }else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) { @@ -7345,13 +7373,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)}; - //handle Escape character backstick - if (name.z[0] == TS_BACKQUOTE_CHAR && name.z[name.n - 1] == TS_BACKQUOTE_CHAR) { - memmove(name.z, name.z + 1, name.n); - name.z[name.n - TS_BACKQUOTE_CHAR_SIZE] = '\0'; - name.n -= TS_BACKQUOTE_CHAR_SIZE; - } + SStrToken name = {.z = pItem->name, .n = (uint32_t)strlen(pItem->name)}; if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsg, msg17); } @@ -7392,6 +7414,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(pMsg, msg24); } + stringProcess(name.z, name.n); + TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } @@ -7745,9 +7769,8 @@ int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlN */ if (pQueryInfo->limit.limit > 0) { pQueryInfo->vgroupLimit = pQueryInfo->limit.limit + pQueryInfo->limit.offset; - pQueryInfo->limit.limit = -1; + pQueryInfo->limit.limit += pQueryInfo->limit.offset; } - pQueryInfo->limit.offset = 0; } } else { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 3849e90ce4526ea974792969217473eb8aef5925..06338b8fd6b55df198148c0fc4b9be38f5894576 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -332,7 +332,12 @@ int tscSendMsgToServer(SSqlObj *pSql) { .handle = NULL, .code = 0 }; - + + if ((rpcMsg.msgType == TSDB_MSG_TYPE_SUBMIT) && (tsShortcutFlag & TSDB_SHORTCUT_RB_RPC_SEND_SUBMIT)) { + rpcFreeCont(rpcMsg.pCont); + return TSDB_CODE_FAILED; + } + rpcSendRequest(pObj->pRpcObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid); return TSDB_CODE_SUCCESS; } @@ -1933,9 +1938,6 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) { tstrncpy(pConnect->db, db, sizeof(pConnect->db)); pthread_mutex_unlock(&pObj->mutex); - tstrncpy(pConnect->clientVersion, version, sizeof(pConnect->clientVersion)); - tstrncpy(pConnect->msgVersion, "", sizeof(pConnect->msgVersion)); - pConnect->pid = htonl(taosGetPId()); taosGetCurrentAPPName(pConnect->appName, NULL); @@ -2035,7 +2037,6 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) { // TODO the expired hb and client can not be identified by server till now. SHeartBeatMsg *pHeartbeat = (SHeartBeatMsg *)pCmd->payload; - tstrncpy(pHeartbeat->clientVer, version, tListLen(pHeartbeat->clientVer)); pHeartbeat->numOfQueries = numOfQueries; pHeartbeat->numOfStreams = numOfStreams; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 2a60448a3ea9da64db55062d2e1042db594d77f6..8eff6335774f60232f70b705efe978f1d7afa129 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -212,16 +212,8 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass, tscDebug("%p DB connection is opening, rpcObj: %p, dnodeConn:%p", pObj, pObj->pRpcObj, pObj->pRpcObj->pDnodeConn); taos_free_result(pSql); - - // version compare only requires the first 3 segments of the version string - int code = taosCheckVersion(version, taos_get_server_info(pObj), 3); - if (code != 0) { - terrno = code; - taos_close(pObj); - return NULL; - } else { - return pObj; - } + + return pObj; } return NULL; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 3732e05df61f49b8025398ef0b959045cfd414f0..39289a55f482df04979ba79f500f9f19d04dac03 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3167,6 +3167,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) { const int32_t table_index = 0; + SSqlCmd * pCmd = &pSql->cmd; + SQueryInfo *pPQueryInfo = tscGetQueryInfo(pCmd); // Parent SQueryInfo SSqlObj *pNew = createSubqueryObj(pSql, table_index, tscRetrieveDataRes, trsupport, TSDB_SQL_SELECT, prevSqlObj); if (pNew != NULL) { // the sub query of two-stage super table query @@ -3176,8 +3178,14 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY; // clear the limit/offset info, since it should not be sent to vnode to be executed. - pQueryInfo->limit.limit = -1; + if (pQueryInfo->limit.offset > 0 && pQueryInfo->limit.limit > 0) { + pQueryInfo->limit.limit += pQueryInfo->limit.offset; + } pQueryInfo->limit.offset = 0; + // if groupby must retrieve all subquery data + if(pPQueryInfo->groupbyColumn || pPQueryInfo->groupbyTag) { + pQueryInfo->limit.limit = -1; + } assert(trsupport->subqueryIndex < pSql->subState.numOfSub); @@ -3353,7 +3361,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) } } - tscError("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self, + tscWarn("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self, pParentObj->res.numOfRows, numOfFailed, numOfSub); tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable before reparse sql", pParentObj->self, pParentObj->cmd.insertParam.numOfTables); diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 7041c922d9b8ff32d6c1340fe313769cf4cf9729..d5369e38f0eb0a64a375d4a30fc05173c6a6aafd 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -47,17 +47,7 @@ int32_t tscNumOfObj = 0; // number of sqlObj in current process. static void *tscCheckDiskUsageTmr; void *tscRpcCache; // cache to keep rpc obj int32_t tscNumOfThreads = 1; // num of rpc threads -#ifdef _TD_POWER_ -char tscLogFileName[12] = "powerlog"; -#elif (_TD_TQ_ == true) -char tscLogFileName[12] = "tqlog"; -#elif (_TD_PRO_ == true) -char tscLogFileName[12] = "prolog"; -#elif (_TD_KH_ == true) -char tscLogFileName[12] = "khclientlog"; -#else char tscLogFileName[12] = "taoslog"; -#endif int tscLogFileNum = 10; static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently @@ -397,6 +387,10 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) { cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION; } else { // set the user specified locale failed, use default LC_CTYPE as current locale locale = setlocale(LC_CTYPE, tsLocale); + if (locale == NULL) { + tscError("failed to set locale:%s failed, neither default LC_CTYPE: %s", pStr, tsLocale); + return -1; + } tscInfo("failed to set locale:%s, current locale:%s", pStr, tsLocale); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3bdd8847b7d33263fafe1290d2b9e916bd8045be..6d6e41aa848524fbdf9e11fdb7f7106a2380c14f 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4519,6 +4519,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { pRes->final = finalBk; pRes->numOfTotal = num; + pthread_mutex_lock(&pSql->subState.mutex); for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { taos_free_result(pSql->pSubs[i]); } @@ -4526,6 +4527,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { tfree(pSql->pSubs); tfree(pSql->subState.states); pSql->subState.numOfSub = 0; + pthread_mutex_unlock(&pSql->subState.mutex); pthread_mutex_destroy(&pSql->subState.mutex); pSql->fp = fp; @@ -5389,7 +5391,7 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in varDataSetLen(nullTypeVal + CHAR_BYTES, INT_BYTES); *(uint32_t*)(varDataVal(nullTypeKey)) = jsonNULL; tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, nullTypeKey, false); // add json null type - if (!json || strtrim(json) == 0 || strcasecmp(json, "null") == 0){ + if (!json || strtrim(json) == 0 || strncasecmp(json, "null", 4) == 0){ *(uint32_t*)(varDataVal(nullTypeVal + CHAR_BYTES)) = jsonNULL; tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, nullTypeVal, true); // add json null value return TSDB_CODE_SUCCESS; diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 890bed123bb1a03c93d676b1b12495c7a8b65ade..e1a4fe6eb8409a1d43ea5e8099f97163c6c4b8c0 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -64,6 +64,7 @@ extern int32_t tsCompressMsgSize; extern int32_t tsCompressColData; extern int32_t tsMaxNumOfDistinctResults; extern char tsTempDir[]; +extern int32_t tsShortcutFlag; // query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c index 31c7e32773965c866f069d04910cbbc59c187762..d0183ba5b0998b8aa509e360c4dfc02e0721604c 100644 --- a/src/common/src/tarithoperator.c +++ b/src/common/src/tarithoperator.c @@ -21,7 +21,7 @@ #include "tcompare.h" #include "texpr.h" -//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); +//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { int32_t *pLeft = (int32_t *)left; @@ -183,215 +183,219 @@ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) { } void vectorAdd(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if ((len1) == (len2)) { - for (; i < (len2) && i >= 0; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,i)); - } - } else if ((len1) == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) + getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < (len1); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,0)); - } - } + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) + getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,0)); + } + } } + void vectorSub(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if ((len1) == (len2)) { - for (; i < (len2) && i >= 0; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,i)); - } - } else if ((len1) == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < (len1); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,0)); - } - } + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,0)); + } + } } + void vectorMultiply(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if ((len1) == (len2)) { - for (; i < (len2) && i >= 0; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,i)); - } - } else if ((len1) == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) * getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < (len1); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,0)); - } - } + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,0)); + } + } } -void vectorDivide(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; + +void vectorDivide(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if ((len1) == (len2)) { - for (; i < (len2) && i >= 0; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,i)); - } - } else if ((len1) == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) /getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < (len1); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,0)); - } - } -} + + if ((len1) == (len2)) { + for (; i < (len2) && i >= 0; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,i)); + } + } else if ((len1) == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) /getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < (len1); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,0)); + } + } +} + void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) { - int32_t i = (_ord == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; - int32_t step = (_ord == TSDB_ORDER_ASC) ? 1 : -1; + int32_t i = (_ord == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; + int32_t step = (_ord == TSDB_ORDER_ASC) ? 1 : -1; double *output=(double*)out; _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type); _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type); _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type); - - if (len1 == (len2)) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); - } - } else if (len1 == 1) { - for (; i >= 0 && i < (len2); i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - ((int64_t)(getVectorDoubleValueFnLeft(left,0) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); - } - } else if ((len2) == 1) { - for (; i >= 0 && i < len1; i += step, output += 1) { - if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { - SET_DOUBLE_NULL(output); - continue; - } - double v, u = 0.0; - GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); - if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { - SET_DOUBLE_NULL(output); - continue; - } - SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,0))) * getVectorDoubleValueFnRight(right,0)); - } - } + + if (len1 == (len2)) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); + } + } else if (len1 == 1) { + for (; i >= 0 && i < (len2); i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - ((int64_t)(getVectorDoubleValueFnLeft(left,0) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i)); + } + } else if ((len2) == 1) { + for (; i >= 0 && i < len1; i += step, output += 1) { + if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) { + SET_DOUBLE_NULL(output); + continue; + } + double v, u = 0.0; + GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0)); + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) { + SET_DOUBLE_NULL(output); + continue; + } + SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,0))) * getVectorDoubleValueFnRight(right,0)); + } + } } _arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr) { diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 2b84c486a38fbb2654cbac6fd64ccf3d6fce05da..6b5f3c97dd7fdb94525109028a0002396d8f95d8 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -65,6 +65,7 @@ char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string int8_t tsEnableCoreFile = 0; int32_t tsMaxBinaryDisplayWidth = 30; +int32_t tsShortcutFlag = 0; // shortcut flag to facilitate debugging /* * denote if the server needs to compress response message at the application layer to client, including query rsp, @@ -1749,6 +1750,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_MB; taosInitConfigOption(cfg); + // shortcut flag to facilitate debugging + cfg.option = "shortcutFlag"; + cfg.ptr = &tsShortcutFlag; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = (1 << 24); + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; diff --git a/src/connector/C#/README.md b/src/connector/C#/README.md new file mode 100644 index 0000000000000000000000000000000000000000..13a6d4cfd00b71dec570963d97c4f9eef6bbdfb7 --- /dev/null +++ b/src/connector/C#/README.md @@ -0,0 +1,434 @@ +## CSharp Connector + +* This C# connector supports: Linux 64/Windows x64/Windows x86. +* This C# connector can be downloaded and included as a normal package from [Nuget.org](https://www.nuget.org/packages/TDengine.Connector/). + +### Installation preparations + +* Install TDengine client. +* .NET interface file TDengineDriver.cs and reference samples both + are located under Windows client's installation path:install_directory/examples/C#. +* Install [.NET SDK](https://dotnet.microsoft.com/download) + +### Installation verification + +Run {client_installation_directory}/examples/C#/C#Checker/C#Checker.cs + +```cmd +cd {client_install_directory}/examples/C\#/C#Checker +//run c#checker.cs +dotnet run -- -h +``` + +### Example Source Code + +You can find examples under follow directories: + +* {client_installation_directory}/examples/C# +* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23) + +**Tips:** +"TDengineTest" is an example that includes some basic sample code like +connect, query and so on. + +### Use C# connector + +#### **prepare** + +**tips:** Need to install .NET SDK first. + +* Create a dotnet project(using console project as an example). + +``` cmd +mkdir test +cd test +dotnet new console +``` + +* Add "TDengine.Connector" as a package through Nuget into project. + +``` cmd +dotnet add package TDengine.Connector +``` + +#### **Connection** + +``` C# +using TDengineDriver; +using System.Runtime.InteropServices; +// ... do something ... +string host = "127.0.0.1" ; +string configDir = "C:/TDengine/cfg"; // For linux should it be /etc/taos. +string user = "root"; +string password = "taosdata"; +string db = ''; // Also can set it to the db name you want to connect. +string port = 0 + +/* Set client options (optional step):charset, locale, timezone. + * Default: charset, locale, timezone same to system. + * Current supports options:TSDB_OPTION_LOCALE, TSDB_OPTION_CHARSET, TSDB_OPTION_TIMEZONE, TSDB_OPTION_CONFIGDIR. +*/ +TDengine.Options((int)TDengineInitOption.TSDB_OPTION_CONFIGDIR,configDir); + +// Get an TDengine connection +InPtr conn = TDengine.Connect(host, user, taosdata, db, port); + +// Check if get connection success +if (conn == IntPtr.Zero) +{ + Console.WriteLine("Connect to TDengine failed"); +} +else +{ + Console.WriteLine("Connect to TDengine success"); +} + +// Close TDengine Connection +if (conn != IntPtr.Zero) +{ + TDengine.Close(this.conn); +} + +// Suggest to clean environment, before exit your application. +TDengine.Cleanup(); +``` + +#### **Execute SQL** + +```C# +// Suppose conn is a valid tdengine connection from previous Connection sample +public static void ExecuteSQL(IntPtr conn, string sql) +{ + IntPtr res = TDengine.Query(conn, sql); + // Check if query success + if((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql + " failure, "); + // Get error message while Res is a not null pointer. + if (res != IntPtr.Zero) + { + Console.Write("reason:" + TDengine.Error(res)); + } + } + else + { + Console.Write(sql + " success, {0} rows affected", TDengine.AffectRows(res)); + //... do something with res ... + + // Important: need to free result to avoid memory leak. + TDengine.FreeResult(res); + } +} + +// Calling method to execute sql; +ExecuteSQL(conn,$"create database if not exists {db};"); +ExecuteSQL(conn,$"use {db};"); +string createSql = "CREATE TABLE meters(ts TIMESTAMP, current FLOAT,"+ +" voltage INT, phase FLOAT)TAGS(location BINARY(30), groupId INT);" +ExecuteSQL(conn,createSql); +ExecuteSQL(conn," INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');"); +ExecuteSQL(conn,$"drop database if exists {db};"); +``` + +#### **Get Query Result** + +```C# +// Following code is a sample that traverses retrieve data from TDengine. +public void ExecuteQuery(IntPtr conn,string sql) +{ + // "conn" is a valid TDengine connection which can + // be got from previous "Connection" sample. + IntPrt res = TDengine.Query(conn, sql); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + // Execute query sql failed + // ... do something ... + } + + // Retrieve data successfully then iterate through "res". + + // Fields count, num of fields, that is equal to retrieved column count. + int fieldCount = TDengine.FieldCount(res); + Console.WriteLine("field count: " + fieldCount); + + // Get query result field information in list form. + List metas = TDengine.FetchFields(res); + for(int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + Console.WriteLine($"index:{j},type:{meta.type},typename:{meta.TypeName()},name:{meta.name},size:{meta.size}"); + } + + // Iterate over the data from the retrieved results. + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + IntPtr colLengthPtr = TDengine.FetchLengths(res); + int[] colLengthArr = new int[fieldCount]; + Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount); + + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + sbyte v2 = (sbyte)Marshal.ReadByte(data); + builder.Append(v2.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]); + builder.Append(v10); + break; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + byte v12 = Marshal.ReadByte(data); + builder.Append(v12.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + ushort v13 = (ushort)Marshal.ReadInt16(data); + builder.Append(v13.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + uint v14 = (uint)Marshal.ReadInt32(data); + builder.Append(v14.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + ulong v15 = (ulong)Marshal.ReadInt64(data); + builder.Append(v15.ToString()); + break; + default: + builder.Append("unknown value"); + break; + } + } + builder.Append("---"); + } + // Do something with the result data, like print. + Console.WriteLine(builder.ToString()); + + // Important free "res". + TDengine.FreeResult(res); +} +``` + +#### **Stmt Bind Sample** + +* Bind different types of data. + +```C# +// Prepare tags values used to binding by stmt. +// An instance of TAOS_BIND can just bind a cell of table. +TAOS_BIND[] binds = new TAOS_BIND[1]; +binds[0] = TaosBind.BindNchar("-123acvnchar"); +// Use TaosBind.BindNil() to bind null values. + +long[] tsArr = new long[5] { 1637064040000, 1637064041000, +1637064042000, 1637064043000, 1637064044000 }; +bool?[] boolArr = new bool?[5] { true, false, null, true, true }; +int?[] intArr = new int?[5] { -200, -100, null, 0, 300 }; +long?[] longArr = new long?[5] { long.MinValue + 1, -2000, null, +1000, long.MaxValue }; +string[] binaryArr = new string[5] { "/TDengine/src/client/src/tscPrepare.c", + String.Empty, null, "doBindBatchParam", + "string.Jion:1234567890123456789012345" }; + +// TAOS_MULTI_BIND can bind a column of data. +TAOS_MULTI_BIND[] mBinds = new TAOS_MULTI_BIND[5]; + +mBinds[0] = TaosMultiBind.MultiBindTimestamp(tsArr); +mBinds[1] = TaosMultiBind.MultiBindBool(boolArr); +mBinds[4] = TaosMultiBind.MultiBindInt(intArr); +mBinds[5] = TaosMultiBind.MultiBindBigint(longArr); +mBinds[12] = TaosMultiBind.MultiBindBinary(binaryArr); + +// After using instance of TAOS_MULTI_BIND and TAOS_BIND, +// need to free the allocated unmanaged memory. +TaosMultiBind.FreeBind(mBind); +TaosMultiBind.FreeMBind(mBinds); +``` + +* Insert + +```C# + /* Pre-request: create stable or normal table. + * Target table for this sample:stmtdemo + * Structure:create stable stmtdemo (ts timestamp,b bool,v4 int, + * v8 bigint,bin binary(100))tags(blob nchar(100)); + */ + // This conn should be a valid connection that is returned by TDengine.Connect(). + IntPtr conn; + IntPtr stmt = IntPtr.Zero; + // Insert statement + string sql = "insert into ? using stmtdemo tags(?,?,?,?,?) values(?)"; + // "use db" before stmtPrepare(). + + stmt = TDengine.StmtInit(conn); + TDengine.StmtPrepare(stmt, sql); + + // Use method StmtSetTbname() to config tablename, + // but needs to create the table before. + // Using StmtSetTbnameTags() to config table name and + // tags' value.(create sub table based on stable automatically) + TDengine.StmtSetTbname_tags(stmt,"t1",binds); + + // Binding multiple lines of data. + TDengine.StmtBindParamBatch(stmt,mBinds); + + // Add current bind parameters into batch. + TDengine.StmtAddBatch(stmt); + + // Execute the batch instruction which has been prepared well by bind_param() method. + TDengine.StmtExecute(stmt); + + // Cause we use unmanaged memory, remember to free occupied memory, after execution. + TaosMultiBind.FreeBind(mBind); + TaosMultiBind.FreeMBind(mBinds); + + // Get error information if current stmt operation failed. + // This method is appropriate for all the stmt methods to get error message. + TDengine.StmtError(stmt); +``` + +* Query + +``` C# +stmt = StmtInit(conn); + +string querySql = "SELECT * FROM T1 WHERE V4 > ? AND V8 < ?"; +StmtPrepare(stmt, querySql); + +// Prepare Query parameters. +TAOS_BIND qparams[2]; +qparams[0] = TaosBind.bindInt(-2); +qparams[1] = TaosBind.bindLong(4); + +// Bind parameters. +TDengine.StmtBindParam(stmt, qparams); + +// Execute +TDengine.StmtExecute(stmt); + +// Get querying result, for SELECT only. +// User application should be freed with API FreeResult() at the end. +IntPtr result = TDengine.StmtUseResult(stmt); + +// This "result" cam be traversed as normal sql query result. +// ... Do something with "result" ... + +TDengine.FreeResult(result); + +// Cause we use unmanaged memory, we need to free occupied memory after execution. +TaosMultiBind.FreeBind(qparams); + +// Close stmt and release resource. +TDengine.StmtClose(stmt); +``` + +* Assert (samples about how to assert every step of stmt is successed or failed) + +```C# +// Special StmtInit(). +IntPtr stmt = TDengine.StmtInit(conn); +if ( stmt == IntPtr.Zero) +{ + Console.WriteLine("Init stmt failed:{0}",TDengine.StmtErrorStr(stmt)); + // ... do something ... +} +else +{ + Console.WriteLine("Init stmt success"); + // Continue +} + +// For all stmt methods that return int type,we can get error message by StmtErrorStr(). +if (TDengine.StmtPrepare(this.stmt, sql) == 0) +{ + Console.WriteLine("stmt prepare success"); + // Continue +} +else +{ + Console.WriteLine("stmt prepare failed:{0} " , TDengine.StmtErrorStr(stmt)); + // ... do something ... +} + +// Estimate wether StmtUseResult() is successful or failed. +// If failed, get the error message by TDengine.Error(res) +IntPtr res = TDengine.StmtUseResult(stmt); +if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) +{ + Console.Write( " StmtUseResult failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } +} +else +{ + Console.WriteLine(sql.ToString() + " success"); +} +``` + +* More samples reference from [examples](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23/). + +**Note:** + +* TDengine V2.0.3.0 supports both 32-bit and 64-bit Windows systems, + so when .NET project generates a .exe file, please select correspond + with "X86" or "x64" for the "Platform" under "Solution"/"Project". +* This .NET interface has been verified in Visual Studio 2015/2017, + other VS versions have not been verified yet. +* Since this. NET connector interface requires the taos.dll file, so before + executing the application, copy the taos.dll file in the + Windows {client_install_directory}/driver directory to the folder where the + .NET project finally generated the .exe executable file. After running the exe + file, you can access the TDengine database and do operations such as insert + and query(This step can be skip if the client has been installed on you machine). diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 1cdc3ed880dcde5e65d5d3a275dbc11f2e9534d3..d0e4e2351962b00c3843f437e93fe91c843df40b 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) SET(_output "${CMAKE_CURRENT_BINARY_DIR}/${JDBC_CMD_NAME}") file(GLOB_RECURSE _depends "${CMAKE_CURRENT_SOURCE_DIR}/src/*") ADD_CUSTOM_COMMAND(OUTPUT ${_output} - DEPENDS taos taos_static + POST_BUILD DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/deploy-pom.xml DEPENDS ${_depends} diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 4b5bcdee67e7d75f25f694e7e05c1b95c33acc65..eef10f5a6a23fd5e334be7d39abd7c5852f17436 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -52,6 +52,11 @@ guava 30.1.1-jre + + org.java-websocket + Java-WebSocket + 1.5.2 + junit junit diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 00eff99f45cb6aa8cc0fbc7bce40e0d82f401e05..6e8296950075f17020dcf163b33035f90f1fd879 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -92,6 +92,16 @@ public class TSDBDriver extends AbstractDriver { */ public static final String PROPERTY_KEY_BATCH_ERROR_IGNORE = "batchErrorIgnore"; + /** + * message receive from server timeout. ms + */ + public static final String PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT = "messageWaitTimeout"; + + /** + * max message number send to server concurrently + */ + public static final String PROPERTY_KEY_MAX_CONCURRENT_REQUEST = "maxConcurrentRequest"; + private TSDBDatabaseMetaData dbMetaData = null; static { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index fdd034a641d7fd829059c73061305bdf38eae1bf..eb51da8aff9e789dead5930a3ee019907d68e3b7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -4,12 +4,22 @@ import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; import com.taosdata.jdbc.*; import com.taosdata.jdbc.utils.HttpClientPoolUtil; +import com.taosdata.jdbc.ws.InFlightRequest; +import com.taosdata.jdbc.ws.Transport; +import com.taosdata.jdbc.ws.WSClient; +import com.taosdata.jdbc.ws.WSConnection; import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.sql.*; +import java.util.HashMap; +import java.util.Map; import java.util.Properties; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; public class RestfulDriver extends AbstractDriver { @@ -39,20 +49,56 @@ public class RestfulDriver extends AbstractDriver { String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041"); String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null; - String loginUrl; + String user; + String password; try { if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED); if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED); - String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), StandardCharsets.UTF_8.displayName()); - String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName()); - loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; + user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), StandardCharsets.UTF_8.displayName()); + password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName()); } catch (UnsupportedEncodingException e) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); } - + String loginUrl; + String batchLoad = info.getProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD); +// if (Boolean.parseBoolean(batchLoad)) { + if (false) { + loginUrl = "ws://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/ws"; + WSClient client; + Transport transport; + try { + int timeout = props.containsKey(TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT) + ? Integer.parseInt(props.getProperty(TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT)) + : Transport.DEFAULT_MESSAGE_WAIT_TIMEOUT; + int maxRequest = props.containsKey(TSDBDriver.PROPERTY_KEY_MAX_CONCURRENT_REQUEST) + ? Integer.parseInt(props.getProperty(TSDBDriver.PROPERTY_KEY_MAX_CONCURRENT_REQUEST)) + : Transport.DEFAULT_MAX_REQUEST; + InFlightRequest inFlightRequest = new InFlightRequest(timeout, maxRequest); + CountDownLatch latch = new CountDownLatch(1); + Map httpHeaders = new HashMap<>(); + client = new WSClient(new URI(loginUrl), user, password, database, inFlightRequest, httpHeaders, latch, maxRequest); + transport = new Transport(client, inFlightRequest); + if (!client.connectBlocking()) { + throw new SQLException("can't create connection with server"); + } + if (!latch.await(timeout, TimeUnit.MILLISECONDS)) { + throw new SQLException("auth timeout"); + } + if (client.isAuth()) { + throw new SQLException("auth failure"); + } + } catch (URISyntaxException e) { + throw new SQLException("Websocket url parse error: " + loginUrl, e); + } catch (InterruptedException e) { + throw new SQLException("creat websocket connection has been Interrupted ", e); + } + return new WSConnection(url, props, transport, database, true); + } + loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; int poolSize = Integer.parseInt(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); boolean keepAlive = Boolean.parseBoolean(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE)); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/InFlightRequest.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/InFlightRequest.java new file mode 100644 index 0000000000000000000000000000000000000000..773bb38a8ea60216a2d6046fc8c88453fd4ff27c --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/InFlightRequest.java @@ -0,0 +1,58 @@ +package com.taosdata.jdbc.ws; + +import java.util.Map; +import java.util.concurrent.*; + +/** + * Unfinished execution + */ +public class InFlightRequest implements AutoCloseable { + private final int timeoutSec; + private final Semaphore semaphore; + private final Map futureMap = new ConcurrentHashMap<>(); + private final ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); + private final ScheduledFuture scheduledFuture; + + public InFlightRequest(int timeoutSec, int concurrentNum) { + this.timeoutSec = timeoutSec; + this.semaphore = new Semaphore(concurrentNum); + this.scheduledFuture = scheduledExecutorService.scheduleAtFixedRate(this::removeTimeoutFuture, timeoutSec, timeoutSec, TimeUnit.MILLISECONDS); + } + + public void put(ResponseFuture responseFuture) throws InterruptedException, TimeoutException { + if (semaphore.tryAcquire(timeoutSec, TimeUnit.MILLISECONDS)) { + futureMap.put(responseFuture.getId(), responseFuture); + } else { + throw new TimeoutException(); + } + } + + public ResponseFuture remove(String id) { + ResponseFuture future = futureMap.remove(id); + if (null != future) { + semaphore.release(); + } + return future; + } + + private void removeTimeoutFuture() { + futureMap.entrySet().removeIf(entry -> { + if (System.nanoTime() - entry.getValue().getTimestamp() > timeoutSec * 1_000_000L) { + try { + entry.getValue().getFuture().completeExceptionally(new TimeoutException()); + }finally { + semaphore.release(); + } + return true; + } else { + return false; + } + }); + } + + @Override + public void close() { + scheduledFuture.cancel(true); + scheduledExecutorService.shutdown(); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/ResponseFuture.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/ResponseFuture.java new file mode 100644 index 0000000000000000000000000000000000000000..f2525c30bfe686739310454aa13a562065551190 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/ResponseFuture.java @@ -0,0 +1,29 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.ws.entity.Response; + +import java.util.concurrent.CompletableFuture; + +public class ResponseFuture { + private final String id; + private final CompletableFuture future; + private final long timestamp; + + public ResponseFuture(String id, CompletableFuture future) { + this.id = id; + this.future = future; + timestamp = System.nanoTime(); + } + + public String getId() { + return id; + } + + public CompletableFuture getFuture() { + return future; + } + + long getTimestamp() { + return timestamp; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/Transport.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/Transport.java new file mode 100644 index 0000000000000000000000000000000000000000..9431e26585023d90db2bc79494d6f1603d4cecd3 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/Transport.java @@ -0,0 +1,46 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.ws.entity.Request; +import com.taosdata.jdbc.ws.entity.Response; + +import java.sql.SQLException; +import java.util.concurrent.CompletableFuture; + +/** + * send message + */ +public class Transport implements AutoCloseable { + + public static final int DEFAULT_MAX_REQUEST = 100; + public static final int DEFAULT_MESSAGE_WAIT_TIMEOUT = 3_000; + + private final WSClient client; + private final InFlightRequest inFlightRequest; + + public Transport(WSClient client, InFlightRequest inFlightRequest) { + this.client = client; + this.inFlightRequest = inFlightRequest; + } + + public CompletableFuture send(Request request) { + CompletableFuture completableFuture = new CompletableFuture<>(); + try { + inFlightRequest.put(new ResponseFuture(request.id(), completableFuture)); + client.send(request.toString()); + } catch (Throwable t) { + inFlightRequest.remove(request.id()); + completableFuture.completeExceptionally(t); + } + return completableFuture; + } + + public boolean isClosed() throws SQLException { + return client.isClosed(); + } + + @Override + public void close() throws SQLException { + client.close(); + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSClient.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSClient.java new file mode 100644 index 0000000000000000000000000000000000000000..d04ef1aba388c4588a7f85be5a19ac0c2776ccf1 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSClient.java @@ -0,0 +1,114 @@ +package com.taosdata.jdbc.ws; + +import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.ws.entity.*; +import org.java_websocket.client.WebSocketClient; +import org.java_websocket.handshake.ServerHandshake; + +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.concurrent.*; + +public class WSClient extends WebSocketClient implements AutoCloseable { + private final String user; + private final String password; + private final String database; + private final CountDownLatch latch; + + private final InFlightRequest inFlightRequest; + ThreadPoolExecutor executor; + + private boolean auth; + + public boolean isAuth() { + return auth; + } + + /** + * create websocket connection client + * + * @param serverUri connection url + * @param user database user + * @param password database password + * @param database connection database + */ + public WSClient(URI serverUri, String user, String password, String database, InFlightRequest inFlightRequest, Map httpHeaders, CountDownLatch latch, int maxRequest) { + super(serverUri, httpHeaders); + this.user = user; + this.password = password; + this.database = database; + this.inFlightRequest = inFlightRequest; + this.latch = latch; + executor = new ThreadPoolExecutor(1, 1, + 0L, TimeUnit.MILLISECONDS, + new ArrayBlockingQueue<>(maxRequest), + r -> { + Thread t = new Thread(r); + t.setName("parse-message-" + t.getId()); + return t; + }, + new ThreadPoolExecutor.CallerRunsPolicy()); + } + + @Override + public void onOpen(ServerHandshake serverHandshake) { + // certification + Request request = Request.generateConnect(user, password, database); + this.send(request.toString()); + } + + @Override + public void onMessage(String message) { + if (!"".equals(message)) { + executor.submit(() -> { + JSONObject jsonObject = JSONObject.parseObject(message); + if (Action.CONN.getAction().equals(jsonObject.getString("action"))) { + latch.countDown(); + if (Code.SUCCESS.getCode() != jsonObject.getInteger("code")) { + auth = false; + this.close(); + } + } else { + Response response = parseMessage(jsonObject); + ResponseFuture remove = inFlightRequest.remove(response.id()); + if (null != remove) { + remove.getFuture().complete(response); + } + } + }); + } + } + + private Response parseMessage(JSONObject message) { + Action action = Action.of(message.getString("action")); + return message.toJavaObject(action.getResponseClazz()); + } + + @Override + public void onMessage(ByteBuffer bytes) { + super.onMessage(bytes); + } + + @Override + public void onClose(int code, String reason, boolean remote) { + if (remote) { + throw new RuntimeException("The remote server closed the connection: " + reason); + } else { + throw new RuntimeException("close connection: " + reason); + } + + } + + @Override + public void onError(Exception e) { + this.close(); + } + + @Override + public void close() { + super.close(); + executor.shutdown(); + inFlightRequest.close(); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSConnection.java new file mode 100644 index 0000000000000000000000000000000000000000..5e2195093df47e97643805012dfcebc271c7fe73 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSConnection.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.AbstractConnection; +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.rs.RestfulDatabaseMetaData; + +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class WSConnection extends AbstractConnection { + private final Transport transport; + private final DatabaseMetaData metaData; + private final String database; + private boolean fetchType; + + public WSConnection(String url, Properties properties, Transport transport, String database, boolean fetchType) { + super(properties); + this.transport = transport; + this.database = database; + this.fetchType = fetchType; + this.metaData = new RestfulDatabaseMetaData(url, properties.getProperty(TSDBDriver.PROPERTY_KEY_USER), this); + } + + @Override + public Statement createStatement() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + +// return new WSStatement(transport, database , fetchType); + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + +// return new WSPreparedStatement(); + return null; + } + + @Override + public void close() throws SQLException { + transport.close(); + } + + @Override + public boolean isClosed() throws SQLException { + return transport.isClosed(); + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + } + return this.metaData; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Action.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Action.java new file mode 100644 index 0000000000000000000000000000000000000000..8d5d8272d73596d0049c3be3aa8d475f501c802f --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Action.java @@ -0,0 +1,48 @@ +package com.taosdata.jdbc.ws.entity; + +import java.util.HashMap; +import java.util.Map; + +/** + * request type + */ +public enum Action { + CONN("conn", ConnectResp.class), + QUERY("query", QueryResp.class), + FETCH("fetch", FetchResp.class), + FETCH_JSON("fetch_json", FetchJsonResp.class), + // fetch_block's class is meaningless + FETCH_BLOCK("fetch_block", Response.class), + ; + private final String action; + private final Class clazz; + + Action(String action, Class clazz) { + this.action = action; + this.clazz = clazz; + } + + public String getAction() { + return action; + } + + public Class getResponseClazz() { + return clazz; + } + + private static final Map actions = new HashMap<>(); + + static { + for (Action value : Action.values()) { + actions.put(value.action, value); + IdUtil.init(value.action); + } + } + + public static Action of(String action) { + if (null == action || action.equals("")) { + return null; + } + return actions.get(action); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Code.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Code.java new file mode 100644 index 0000000000000000000000000000000000000000..6b6d60858d447165a5c922f5e08a1db783f60e01 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Code.java @@ -0,0 +1,35 @@ +package com.taosdata.jdbc.ws.entity; + +/** + * response message info + */ +public enum Code { + SUCCESS(0, "success"), + + ; + + private final int code; + private final String message; + + Code(int code, String message) { + this.code = code; + this.message = message; + } + + public int getCode() { + return code; + } + + public String getMessage() { + return message; + } + + public static Code of(int code) { + for (Code value : Code.values()) { + if (value.code == code) { + return value; + } + } + return null; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/ConnectResp.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/ConnectResp.java new file mode 100644 index 0000000000000000000000000000000000000000..f1071ef74cc749812e23923f1dbf31a9086ae710 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/ConnectResp.java @@ -0,0 +1,25 @@ +package com.taosdata.jdbc.ws.entity; + +/** + * connection result pojo + */ +public class ConnectResp extends Response { + private int code; + private String message; + + public int getCode() { + return code; + } + + public void setCode(int code) { + this.code = code; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchBlockResp.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchBlockResp.java new file mode 100644 index 0000000000000000000000000000000000000000..40052f68e9209525501dba2478bec97ff96b3c04 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchBlockResp.java @@ -0,0 +1,4 @@ +package com.taosdata.jdbc.ws.entity; + +public class FetchBlockResp { +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchJsonResp.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchJsonResp.java new file mode 100644 index 0000000000000000000000000000000000000000..bdf6d51232b8492fc5d4aaa5fb9e68ffa133a8f5 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchJsonResp.java @@ -0,0 +1,22 @@ +package com.taosdata.jdbc.ws.entity; + +public class FetchJsonResp extends Response{ + private long id; + private Object[][] data; + + public Object[][] getData() { + return data; + } + + public void setData(Object[][] data) { + this.data = data; + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchResp.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchResp.java new file mode 100644 index 0000000000000000000000000000000000000000..45f5452007e4fc1122f6eb4f03e196bdbb8303ed --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchResp.java @@ -0,0 +1,61 @@ +package com.taosdata.jdbc.ws.entity; + +/** + * fetch result pojo + */ +public class FetchResp extends Response{ + private int code; + private String message; + private long id; + private boolean completed; + private int[] lengths; + private int rows; + + public int getCode() { + return code; + } + + public void setCode(int code) { + this.code = code; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public boolean isCompleted() { + return completed; + } + + public void setCompleted(boolean completed) { + this.completed = completed; + } + + public int[] getLengths() { + return lengths; + } + + public void setLengths(int[] lengths) { + this.lengths = lengths; + } + + public int getRows() { + return rows; + } + + public void setRows(int rows) { + this.rows = rows; + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/IdUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/IdUtil.java new file mode 100644 index 0000000000000000000000000000000000000000..fb2aab51c61f91790b8c79a7e0898de5ab6fca8b --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/IdUtil.java @@ -0,0 +1,20 @@ +package com.taosdata.jdbc.ws.entity; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * generate id for request + */ +public class IdUtil { + private static final Map ids = new HashMap<>(); + + public static long getId(String action) { + return ids.get(action).incrementAndGet(); + } + + public static void init(String action) { + ids.put(action, new AtomicLong(0)); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/QueryResp.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/QueryResp.java new file mode 100644 index 0000000000000000000000000000000000000000..22e1418b685794fb8ad37417642daaff308c18cb --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/QueryResp.java @@ -0,0 +1,114 @@ +package com.taosdata.jdbc.ws.entity; + +import com.alibaba.fastjson.annotation.JSONField; + +/** + * query result pojo + */ +public class QueryResp extends Response { + private int code; + + public int getCode() { + return code; + } + + public void setCode(int code) { + this.code = code; + } + + private String message; + + private long id; + + @JSONField(name = "is_update") + private boolean isUpdate; + + @JSONField(name = "affected_rows") + private int affectedRows; + + @JSONField(name = "fields_count") + private int fieldsCount; + + @JSONField(name = "fields_names") + private String[] fieldsNames; + + @JSONField(name = "fields_types") + private int[] fieldsTypes; + + @JSONField(name = "fields_lengths") + private int[] fieldsLengths; + + private int precision; + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public boolean isUpdate() { + return isUpdate; + } + + public void setUpdate(boolean update) { + isUpdate = update; + } + + public int getAffectedRows() { + return affectedRows; + } + + public void setAffectedRows(int affectedRows) { + this.affectedRows = affectedRows; + } + + public int getFieldsCount() { + return fieldsCount; + } + + public void setFieldsCount(int fieldsCount) { + this.fieldsCount = fieldsCount; + } + + public String[] getFieldsNames() { + return fieldsNames; + } + + public void setFieldsNames(String[] fieldsNames) { + this.fieldsNames = fieldsNames; + } + + public int[] getFieldsTypes() { + return fieldsTypes; + } + + public void setFieldsTypes(int[] fieldsTypes) { + this.fieldsTypes = fieldsTypes; + } + + public int[] getFieldsLengths() { + return fieldsLengths; + } + + public void setFieldsLengths(int[] fieldsLengths) { + this.fieldsLengths = fieldsLengths; + } + + public int getPrecision() { + return precision; + } + + public void setPrecision(int precision) { + this.precision = precision; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Request.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Request.java new file mode 100644 index 0000000000000000000000000000000000000000..ca0fdf427d55901bea85537a083f70a3159a01f5 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Request.java @@ -0,0 +1,156 @@ +package com.taosdata.jdbc.ws.entity; + +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.annotation.JSONField; + +/** + * send to taosadapter + */ +public class Request { + private String action; + private Payload args; + + public Request(String action, Payload args) { + this.action = action; + this.args = args; + } + + public String id() { + return action + "_" + args.getReqId(); + } + + public String getAction() { + return action; + } + + public void setAction(String action) { + this.action = action; + } + + public Payload getArgs() { + return args; + } + + public void setArgs(Payload args) { + this.args = args; + } + + @Override + public String toString() { + return JSON.toJSONString(this); + } + + public static Request generateConnect(String user, String password, String db) { + long reqId = IdUtil.getId(Action.CONN.getAction()); + ConnectReq connectReq = new ConnectReq(reqId, user, password, db); + return new Request(Action.CONN.getAction(), connectReq); + } + + public static Request generateQuery(String sql) { + long reqId = IdUtil.getId(Action.QUERY.getAction()); + QueryReq queryReq = new QueryReq(reqId, sql); + return new Request(Action.QUERY.getAction(), queryReq); + } + + public static Request generateFetch(long id) { + long reqId = IdUtil.getId(Action.FETCH.getAction()); + FetchReq fetchReq = new FetchReq(reqId, id); + return new Request(Action.FETCH.getAction(), fetchReq); + } + + public static Request generateFetchJson(long id) { + long reqId = IdUtil.getId(Action.FETCH_JSON.getAction()); + FetchReq fetchReq = new FetchReq(reqId, id); + return new Request(Action.FETCH_JSON.getAction(), fetchReq); + } + + public static Request generateFetchBlock(long id) { + long reqId = IdUtil.getId(Action.FETCH_BLOCK.getAction()); + FetchReq fetchReq = new FetchReq(reqId, id); + return new Request(Action.FETCH_BLOCK.getAction(), fetchReq); + } +} + +class Payload { + @JSONField(name = "req_id") + private final long reqId; + + public Payload(long reqId) { + this.reqId = reqId; + } + + public long getReqId() { + return reqId; + } +} + +class ConnectReq extends Payload { + private String user; + private String password; + private String db; + + public ConnectReq(long reqId, String user, String password, String db) { + super(reqId); + this.user = user; + this.password = password; + this.db = db; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public String getDb() { + return db; + } + + public void setDb(String db) { + this.db = db; + } +} + +class QueryReq extends Payload { + private String sql; + + public QueryReq(long reqId, String sql) { + super(reqId); + this.sql = sql; + } + + public String getSql() { + return sql; + } + + public void setSql(String sql) { + this.sql = sql; + } +} + +class FetchReq extends Payload { + private long id; + + public FetchReq(long reqId, long id) { + super(reqId); + this.id = id; + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Response.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Response.java new file mode 100644 index 0000000000000000000000000000000000000000..780e30067fdb14eeca465cc1d50842219a58774e --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Response.java @@ -0,0 +1,33 @@ +package com.taosdata.jdbc.ws.entity; + +import com.alibaba.fastjson.annotation.JSONField; + +/** + * return from taosadapter + */ +public class Response { + private String action; + + @JSONField(name = "req_id") + private long reqId; + + public String id() { + return action + "_" + reqId; + } + + public String getAction() { + return action; + } + + public void setAction(String action) { + this.action = action; + } + + public long getReqId() { + return reqId; + } + + public void setReqId(long reqId) { + this.reqId = reqId; + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSConnectionTest.java new file mode 100644 index 0000000000000000000000000000000000000000..0719a5094ce6d9dbd96d0abb6f313a126f542621 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSConnectionTest.java @@ -0,0 +1,63 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.annotation.CatalogRunner; +import com.taosdata.jdbc.annotation.Description; +import com.taosdata.jdbc.annotation.TestTarget; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; + +import java.sql.*; +import java.util.Properties; + +/** + * You need to start taosadapter before testing this method + */ +@Ignore +@RunWith(CatalogRunner.class) +@TestTarget(alias = "test connection with server", author = "huolibo",version = "2.0.37") +public class WSConnectionTest { + private static final String host = "192.168.1.98"; + private static final int port = 6041; + private Connection connection; + + @Test + @Description("normal test with websocket server") + public void normalConection() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":" + port + "/test?user=root&password=taosdata"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + connection = DriverManager.getConnection(url, properties); + } + + @Test + @Description("url has no db") + public void withoutDBConection() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":" + port + "/?user=root&password=taosdata"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + connection = DriverManager.getConnection(url, properties); + } + + @Test + @Description("user and password in property") + public void propertyUserPassConection() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":" + port + "/"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD,"taosdata"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + connection = DriverManager.getConnection(url, properties); + } + + @Test +// @Test(expected = SQLException.class) + @Description("wrong password or user") + public void wrongUserOrPasswordConection() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":" + port + "/test?user=abc&password=taosdata"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + connection = DriverManager.getConnection(url, properties); + } +} diff --git a/src/connector/node-red-contrib-tdengine/package-lock.json b/src/connector/node-red-contrib-tdengine/package-lock.json index a2dcf04c0e2bbd549a6828df89ca4ae6c0d3bb93..37d4784caaa8e225290991cf8f598df2d7d122e8 100644 --- a/src/connector/node-red-contrib-tdengine/package-lock.json +++ b/src/connector/node-red-contrib-tdengine/package-lock.json @@ -1,35 +1,9 @@ { "name": "node-red-contrib-tdengine", - "version": "0.0.2", + "version": "0.0.3", "lockfileVersion": 1, "requires": true, "dependencies": { - "@babel/code-frame": { - "version": "7.16.0", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.16.0.tgz", - "integrity": "sha512-IF4EOMEV+bfYwOmNxGzSnjR2EmQod7f1UXOpZM3l4i4o4QNwzjtJAu/HxdjHq0aYBvdqMuQEY1eg0nqW9ZPORA==", - "dev": true, - "requires": { - "@babel/highlight": "^7.16.0" - } - }, - "@babel/helper-validator-identifier": { - "version": "7.15.7", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.15.7.tgz", - "integrity": "sha512-K4JvCtQqad9OY2+yTU8w+E82ywk/fe+ELNlt1G8z3bVGlZfn/hOcQQsUhGhW/N+tb3fxK800wLtKOE/aM0m72w==", - "dev": true - }, - "@babel/highlight": { - "version": "7.16.0", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.0.tgz", - "integrity": "sha512-t8MH41kUQylBtu2+4IQA3atqevA2lRgqA2wyVB/YiWmsDSuylZZuXOUy9ric30hfzauEFfdsuk/eXTRrGrfd0g==", - "dev": true, - "requires": { - "@babel/helper-validator-identifier": "^7.15.7", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - } - }, "@babel/runtime": { "version": "7.16.5", "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.16.5.tgz", @@ -254,41 +228,6 @@ "integrity": "sha512-VkE3KLBmJwcCaVARtQpfuKcKv8gcBmUubrfHGF84dXuuW6jgsRYxPtzcIhPyK9WAPpRt2/xY6zkD9MnRaJzSyw==", "dev": true }, - "@sinonjs/commons": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.8.3.tgz", - "integrity": "sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ==", - "dev": true, - "requires": { - "type-detect": "4.0.8" - } - }, - "@sinonjs/fake-timers": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-6.0.1.tgz", - "integrity": "sha512-MZPUxrmFubI36XS1DI3qmI0YdN1gks62JtFZvxR67ljjSNCeK6U08Zx4msEWOXuofgqUt6zPHSi1H9fbjR/NRA==", - "dev": true, - "requires": { - "@sinonjs/commons": "^1.7.0" - } - }, - "@sinonjs/samsam": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-5.3.1.tgz", - "integrity": "sha512-1Hc0b1TtyfBu8ixF/tpfSHTVWKwCBLY4QJbkgnE7HcwyvT2xArDxb4K7dMgqRm3szI+LJbzmW/s4xxEhv6hwDg==", - "dev": true, - "requires": { - "@sinonjs/commons": "^1.6.0", - "lodash.get": "^4.4.2", - "type-detect": "^4.0.8" - } - }, - "@sinonjs/text-encoding": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.1.tgz", - "integrity": "sha512-+iTbntw2IZPb/anVDbypzfQa+ay64MW0Zo8aJ8gZPWMMK6/OubMVb6lUPMagqjOPnmtauXnFCACVl3O7ogjeqQ==", - "dev": true - }, "@szmarczak/http-timer": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.6.tgz", @@ -331,12 +270,6 @@ "integrity": "sha512-6xwbrW4JJiJLgF+zNypN5wr2ykM9/jHcL7rQ8fZe2vuftggjzZeRSM4OwRc6Xk8qWjwJ99qVHo/JgOGmomWRog==", "dev": true }, - "@types/normalize-package-data": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz", - "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==", - "dev": true - }, "@types/responselike": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.0.tgz", @@ -346,11 +279,6 @@ "@types/node": "*" } }, - "@ungap/promise-all-settled": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", - "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==" - }, "abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", @@ -403,30 +331,15 @@ "ansi-colors": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", - "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==" + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true }, "ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" - }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, - "requires": { - "color-convert": "^1.9.0" - } - }, - "anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "requires": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - } + "optional": true }, "append-field": { "version": "1.0.0", @@ -499,7 +412,8 @@ "balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true }, "base64-js": { "version": "1.5.1", @@ -541,11 +455,6 @@ "integrity": "sha1-mrVie5PmBiH/fNrF2pczAn3x0Ms=", "dev": true }, - "binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==" - }, "bl": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", @@ -602,24 +511,12 @@ "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "requires": { - "fill-range": "^7.0.1" - } - }, - "browser-stdout": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", - "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==" - }, "buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", @@ -693,22 +590,6 @@ "responselike": "^2.0.0" } }, - "camelcase": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.1.tgz", - "integrity": "sha512-tVI4q5jjFV5CavAU8DXfza/TJcZutVKo/5Foskmsqcm0MsL91moHvwiGNnqaa2o6PF/7yT5ikDRcVcl8Rj6LCA==" - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, "cheerio": { "version": "1.0.0-rc.10", "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.10.tgz", @@ -737,21 +618,6 @@ "domutils": "^2.7.0" } }, - "chokidar": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.2.tgz", - "integrity": "sha512-ekGhOnNVPgT77r4K/U3GDhu+FQ2S8TnK/s2KbIGXi0SZWuwkZ2QNyfWdZW+TVfn84DpEP7rLeCt2UI6bJ8GwbQ==", - "requires": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "fsevents": "~2.3.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - } - }, "chownr": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", @@ -767,16 +633,6 @@ "colors": "1.0.3" } }, - "cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, "clone": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", @@ -792,21 +648,6 @@ "mimic-response": "^1.0.0" } }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, "color-support": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", @@ -839,16 +680,11 @@ "minimist": "^1.1.0" } }, - "component-emitter": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", - "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==", - "dev": true - }, "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true }, "concat-stream": { "version": "1.6.2", @@ -960,12 +796,6 @@ "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=", "dev": true }, - "cookiejar": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.1.3.tgz", - "integrity": "sha512-JxbCBUdrfr6AQjOXrxoTvAMJO4HBTUIlBzslcJPAz+/KT8yk53fXun51u+RenNYvad/+Vc2DIz5o9UxlCDymFQ==", - "dev": true - }, "core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", @@ -1016,11 +846,6 @@ "ms": "2.1.2" } }, - "decamelize": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", - "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==" - }, "decompress-response": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", @@ -1112,12 +937,6 @@ } } }, - "diff": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", - "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", - "dev": true - }, "dom-serializer": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", @@ -1176,7 +995,9 @@ "emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "optional": true }, "encodeurl": { "version": "1.0.2", @@ -1208,32 +1029,12 @@ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", "dev": true }, - "error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dev": true, - "requires": { - "is-arrayish": "^0.2.1" - } - }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" - }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=", "dev": true }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true - }, "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", @@ -1352,26 +1153,12 @@ } } }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "dev": true - }, "fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", "dev": true }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "requires": { - "to-regex-range": "^5.0.1" - } - }, "finalhandler": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", @@ -1404,21 +1191,6 @@ } } }, - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, - "flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==" - }, "follow-redirects": { "version": "1.14.7", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.7.tgz", @@ -1435,12 +1207,6 @@ "mime-types": "^2.1.12" } }, - "formidable": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/formidable/-/formidable-1.2.6.tgz", - "integrity": "sha512-KcpbcpuLNOwrEjnbpMC0gS+X8ciDoZE1kkqzat4a8vrprf+s9pKNQ/QIwWfbfs4ltgmFl3MD177SNTkve3BwGQ==", - "dev": true - }, "forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -1486,18 +1252,7 @@ "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "optional": true - }, - "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", "dev": true }, "gauge": { @@ -1518,11 +1273,6 @@ "wide-align": "^1.1.2" } }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" - }, "get-stream": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", @@ -1546,14 +1296,6 @@ "path-is-absolute": "^1.0.0" } }, - "glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "requires": { - "is-glob": "^4.0.1" - } - }, "got": { "version": "11.8.3", "resolved": "https://registry.npmjs.org/got/-/got-11.8.3.tgz", @@ -1579,26 +1321,6 @@ "integrity": "sha512-qkIilPUYcNhJpd33n0GBXTB1MMPp14TxEsEs0pTrsSVucApsYzW5V+Q8Qxhik6KU3evy+qkAAowTByymK0avdg==", "dev": true }, - "growl": { - "version": "1.10.5", - "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", - "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==" - }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true - }, "has-unicode": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", @@ -1612,11 +1334,6 @@ "integrity": "sha512-WdZTbAByD+pHfl/g9QSsBIIwy8IT+EsPiKDs0KNX+zSHhdDLFKdZu0BQHljvO+0QI/BasbMSUa8wYNCZTvhslg==", "dev": true }, - "he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==" - }, "help-me": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/help-me/-/help-me-3.0.0.tgz", @@ -1627,12 +1344,6 @@ "readable-stream": "^3.6.0" } }, - "hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", - "dev": true - }, "hpagent": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/hpagent/-/hpagent-0.1.2.tgz", @@ -1726,6 +1437,7 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, "requires": { "once": "^1.3.0", "wrappy": "1" @@ -1734,7 +1446,8 @@ "inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true }, "ipaddr.js": { "version": "1.9.1", @@ -1742,61 +1455,12 @@ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", "dev": true }, - "is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", - "dev": true - }, - "is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "requires": { - "binary-extensions": "^2.0.0" - } - }, - "is-core-module": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.0.tgz", - "integrity": "sha512-vd15qHsaqrRL7dtH6QNuy0ndJmRDrS9HAM1CAiSifNUFv4x1a0CCVsj18hJ1mShxIG6T2i1sO78MkP56r0nYRw==", - "dev": true, - "requires": { - "has": "^1.0.3" - } - }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=" - }, "is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" - }, - "is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "requires": { - "is-extglob": "^2.1.1" - } - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" - }, - "is-plain-obj": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==" - }, - "is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==" + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "optional": true }, "is-utf8": { "version": "0.2.1", @@ -1810,17 +1474,6 @@ "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", "dev": true }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - }, "js-yaml": { "version": "3.14.1", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", @@ -1837,12 +1490,6 @@ "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", "dev": true }, - "json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "dev": true - }, "json-schema-traverse": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", @@ -1871,12 +1518,6 @@ "universalify": "^2.0.0" } }, - "just-extend": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-4.2.1.tgz", - "integrity": "sha512-g3UB796vUFIY90VIv/WX3L2c8CS2MdWUww3CNrYmqza1Fg0DURc2K/O4YrnklBdQarSJ/y8JnJYDGc+1iumQjg==", - "dev": true - }, "keyv": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.0.4.tgz", @@ -1892,87 +1533,12 @@ "integrity": "sha1-wuep93IJTe6dNCAq6KzORoeHVYA=", "dev": true }, - "lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "dev": true - }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "requires": { - "p-locate": "^4.1.0" - } - }, "lodash.clonedeep": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=", "dev": true }, - "lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", - "dev": true - }, - "log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", - "requires": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, "lowercase-keys": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", @@ -2084,6 +1650,7 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, "requires": { "brace-expansion": "^1.1.7" } @@ -2119,141 +1686,6 @@ "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", "dev": true }, - "mocha": { - "version": "9.1.3", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-9.1.3.tgz", - "integrity": "sha512-Xcpl9FqXOAYqI3j79pEtHBBnQgVXIhpULjGQa7DVb0Po+VzmSIK9kanAiWLHoRR/dbZ2qpdPshuXr8l1VaHCzw==", - "requires": { - "@ungap/promise-all-settled": "1.1.2", - "ansi-colors": "4.1.1", - "browser-stdout": "1.3.1", - "chokidar": "3.5.2", - "debug": "4.3.2", - "diff": "5.0.0", - "escape-string-regexp": "4.0.0", - "find-up": "5.0.0", - "glob": "7.1.7", - "growl": "1.10.5", - "he": "1.2.0", - "js-yaml": "4.1.0", - "log-symbols": "4.1.0", - "minimatch": "3.0.4", - "ms": "2.1.3", - "nanoid": "3.1.25", - "serialize-javascript": "6.0.0", - "strip-json-comments": "3.1.1", - "supports-color": "8.1.1", - "which": "2.0.2", - "workerpool": "6.1.5", - "yargs": "16.2.0", - "yargs-parser": "20.2.4", - "yargs-unparser": "2.0.0" - }, - "dependencies": { - "argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "debug": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", - "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", - "requires": { - "ms": "2.1.2" - }, - "dependencies": { - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - } - } - }, - "diff": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", - "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==" - }, - "escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==" - }, - "find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "requires": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - } - }, - "glob": { - "version": "7.1.7", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", - "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "requires": { - "argparse": "^2.0.1" - } - }, - "locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "requires": { - "p-locate": "^5.0.0" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "requires": { - "yocto-queue": "^0.1.0" - } - }, - "p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "requires": { - "p-limit": "^3.0.2" - } - }, - "supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, "moment": { "version": "2.29.1", "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.1.tgz", @@ -2361,41 +1793,12 @@ "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", "dev": true }, - "nanoid": { - "version": "3.1.25", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.25.tgz", - "integrity": "sha512-rdwtIXaXCLFAQbnfqDRnI6jaRHp9fTcYBjtFKE8eezcZ7LuLjhUaQGNeMXf1HmRoCH32CLz6XwX0TtxEOS/A3Q==" - }, "negotiator": { "version": "0.6.2", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", "dev": true }, - "nise": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/nise/-/nise-4.1.0.tgz", - "integrity": "sha512-eQMEmGN/8arp0xsvGoQ+B1qvSkR73B1nWSCh7nOt5neMCtwcQVYQGdzQMhcNscktTsWB54xnlSQFzOAPJD8nXA==", - "dev": true, - "requires": { - "@sinonjs/commons": "^1.7.0", - "@sinonjs/fake-timers": "^6.0.0", - "@sinonjs/text-encoding": "^0.7.1", - "just-extend": "^4.0.2", - "path-to-regexp": "^1.7.0" - }, - "dependencies": { - "path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", - "dev": true, - "requires": { - "isarray": "0.0.1" - } - } - } - }, "node-addon-api": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-3.2.1.tgz", @@ -2404,9 +1807,9 @@ "optional": true }, "node-fetch": { - "version": "2.6.6", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.6.tgz", - "integrity": "sha512-Z8/6vRlTUChSdIgMa51jxQ4lrw/Jy5SOW10ObaA47/RElsAN2c5Pn8bTgFGWn/ibwzXTE8qwr1Yzx28vsecXEA==", + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", "dev": true, "optional": true, "requires": { @@ -2461,34 +1864,6 @@ } } }, - "node-red-node-test-helper": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/node-red-node-test-helper/-/node-red-node-test-helper-0.2.7.tgz", - "integrity": "sha512-OanSQ1hrsigHVtMjL/cuhtjxhTdRBXxd3IALJC9eg0WOHRF75ZI7RYhFWqqOsvQ++BwmNj8ki1S49D8cZyZTWA==", - "dev": true, - "requires": { - "body-parser": "1.19.0", - "express": "4.17.1", - "read-pkg-up": "7.0.1", - "semver": "7.3.4", - "should": "^13.2.3", - "should-sinon": "0.0.6", - "sinon": "9.2.4", - "stoppable": "1.1.0", - "supertest": "4.0.2" - }, - "dependencies": { - "semver": { - "version": "7.3.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz", - "integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - } - } - }, "nopt": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", @@ -2498,31 +1873,6 @@ "abbrev": "1" } }, - "normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true - } - } - }, - "normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==" - }, "normalize-url": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", @@ -2604,6 +1954,7 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, "requires": { "wrappy": "1" } @@ -2614,42 +1965,6 @@ "integrity": "sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==", "dev": true }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "requires": { - "p-limit": "^2.2.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true - }, - "parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - } - }, "parse5": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", @@ -2705,20 +2020,10 @@ "integrity": "sha1-tVOaqPwiWj0a0XlHbd8ja0QPUuQ=", "dev": true }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==" - }, "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" - }, - "path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", "dev": true }, "path-to-regexp": { @@ -2733,11 +2038,6 @@ "integrity": "sha1-HUCLP9t2kjuVQ9lvtMnf1TXZy10=", "dev": true }, - "picomatch": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", - "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==" - }, "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", @@ -2800,14 +2100,6 @@ "integrity": "sha1-T2ih3Arli9P7lYSMMDJNt11kNgs=", "dev": true }, - "randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "requires": { - "safe-buffer": "^5.1.0" - } - }, "range-parser": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", @@ -2835,37 +2127,6 @@ "mute-stream": "~0.0.4" } }, - "read-pkg": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", - "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", - "dev": true, - "requires": { - "@types/normalize-package-data": "^2.4.0", - "normalize-package-data": "^2.5.0", - "parse-json": "^5.0.0", - "type-fest": "^0.6.0" - }, - "dependencies": { - "type-fest": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", - "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", - "dev": true - } - } - }, - "read-pkg-up": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", - "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", - "dev": true, - "requires": { - "find-up": "^4.1.0", - "read-pkg": "^5.2.0", - "type-fest": "^0.8.1" - } - }, "readable-stream": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", @@ -2877,14 +2138,6 @@ "util-deprecate": "^1.0.1" } }, - "readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "requires": { - "picomatch": "^2.2.1" - } - }, "regenerator-runtime": { "version": "0.13.9", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", @@ -2897,27 +2150,12 @@ "integrity": "sha1-M2Hs+jymwYKDOA3Qu5VG85D17Oc=", "dev": true }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" - }, "require-from-string": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", "dev": true }, - "resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", - "dev": true, - "requires": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" - } - }, "resolve-alpn": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", @@ -2952,7 +2190,8 @@ "safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true }, "safer-buffer": { "version": "2.1.2", @@ -3027,14 +2266,6 @@ } } }, - "serialize-javascript": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", - "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", - "requires": { - "randombytes": "^2.1.0" - } - }, "serve-static": { "version": "1.14.1", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", @@ -3060,66 +2291,6 @@ "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==", "dev": true }, - "should": { - "version": "13.2.3", - "resolved": "https://registry.npmjs.org/should/-/should-13.2.3.tgz", - "integrity": "sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ==", - "dev": true, - "requires": { - "should-equal": "^2.0.0", - "should-format": "^3.0.3", - "should-type": "^1.4.0", - "should-type-adaptors": "^1.0.1", - "should-util": "^1.0.0" - } - }, - "should-equal": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/should-equal/-/should-equal-2.0.0.tgz", - "integrity": "sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==", - "dev": true, - "requires": { - "should-type": "^1.4.0" - } - }, - "should-format": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/should-format/-/should-format-3.0.3.tgz", - "integrity": "sha1-m/yPdPo5IFxT04w01xcwPidxJPE=", - "dev": true, - "requires": { - "should-type": "^1.3.0", - "should-type-adaptors": "^1.0.1" - } - }, - "should-sinon": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/should-sinon/-/should-sinon-0.0.6.tgz", - "integrity": "sha512-ScBOH5uW5QVFaONmUnIXANSR6z5B8IKzEmBP3HE5sPOCDuZ88oTMdUdnKoCVQdLcCIrRrhRLPS5YT+7H40a04g==", - "dev": true - }, - "should-type": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/should-type/-/should-type-1.4.0.tgz", - "integrity": "sha1-B1bYzoRt/QmEOmlHcZ36DUz/XPM=", - "dev": true - }, - "should-type-adaptors": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/should-type-adaptors/-/should-type-adaptors-1.1.0.tgz", - "integrity": "sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA==", - "dev": true, - "requires": { - "should-type": "^1.3.0", - "should-util": "^1.0.0" - } - }, - "should-util": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/should-util/-/should-util-1.0.1.tgz", - "integrity": "sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g==", - "dev": true - }, "signal-exit": { "version": "3.0.6", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.6.tgz", @@ -3127,69 +2298,6 @@ "dev": true, "optional": true }, - "sinon": { - "version": "9.2.4", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-9.2.4.tgz", - "integrity": "sha512-zljcULZQsJxVra28qIAL6ow1Z9tpattkCTEJR4RBP3TGc00FcttsP5pK284Nas5WjMZU5Yzy3kAIp3B3KRf5Yg==", - "dev": true, - "requires": { - "@sinonjs/commons": "^1.8.1", - "@sinonjs/fake-timers": "^6.0.1", - "@sinonjs/samsam": "^5.3.1", - "diff": "^4.0.2", - "nise": "^4.0.4", - "supports-color": "^7.1.0" - }, - "dependencies": { - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "spdx-correct": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz", - "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==", - "dev": true, - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", - "dev": true - }, - "spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.11", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.11.tgz", - "integrity": "sha512-Ctl2BrFiM0X3MANYgj3CkygxhRmr9mi6xhejbdO960nF6EDJApTYpn0BQnDKlnNBULKiCN1n3w9EBkHK8ZWg+g==", - "dev": true - }, "split2": { "version": "3.2.2", "resolved": "https://registry.npmjs.org/split2/-/split2-3.2.2.tgz", @@ -3211,12 +2319,6 @@ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", "dev": true }, - "stoppable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/stoppable/-/stoppable-1.1.0.tgz", - "integrity": "sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw==", - "dev": true - }, "stream-shift": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", @@ -3233,6 +2335,8 @@ "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "optional": true, "requires": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -3252,114 +2356,10 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==" - }, - "superagent": { - "version": "3.8.3", - "resolved": "https://registry.npmjs.org/superagent/-/superagent-3.8.3.tgz", - "integrity": "sha512-GLQtLMCoEIK4eDv6OGtkOoSMt3D+oq0y3dsxMuYuDvaNUvuT8eFBuLmfR0iYYzHC1e8hpzC6ZsxbuP6DIalMFA==", - "dev": true, - "requires": { - "component-emitter": "^1.2.0", - "cookiejar": "^2.1.0", - "debug": "^3.1.0", - "extend": "^3.0.0", - "form-data": "^2.3.1", - "formidable": "^1.2.0", - "methods": "^1.1.1", - "mime": "^1.4.1", - "qs": "^6.5.1", - "readable-stream": "^2.3.5" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - }, - "form-data": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", - "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", - "dev": true, - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true - }, - "mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "dev": true - }, - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "supertest": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/supertest/-/supertest-4.0.2.tgz", - "integrity": "sha512-1BAbvrOZsGA3YTCWqbmh14L0YEq0EGICX/nBnfkfVJn7SrxQV1I3pMYjSzG9y/7ZU2V9dWqyqk2POwxlb09duQ==", - "dev": true, - "requires": { - "methods": "^1.1.2", - "superagent": "^3.8.3" - } - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "dev": true, + "optional": true, "requires": { - "has-flag": "^3.0.0" + "ansi-regex": "^5.0.1" } }, "tar": { @@ -3376,14 +2376,6 @@ "yallist": "^4.0.0" } }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "requires": { - "is-number": "^7.0.0" - } - }, "toidentifier": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", @@ -3422,18 +2414,6 @@ "integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw==", "dev": true }, - "type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", - "dev": true - }, - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true - }, "type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", @@ -3510,16 +2490,6 @@ "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", "dev": true }, - "validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, "vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", @@ -3544,14 +2514,6 @@ "webidl-conversions": "^3.0.0" } }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "requires": { - "isexe": "^2.0.0" - } - }, "wide-align": { "version": "1.1.5", "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", @@ -3562,48 +2524,11 @@ "string-width": "^1.0.2 || 2 || 3 || 4" } }, - "workerpool": { - "version": "6.1.5", - "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.5.tgz", - "integrity": "sha512-XdKkCK0Zqc6w3iTxLckiuJ81tiD/o5rBE/m+nXpRCB+/Sq4DqkfXZ/x0jW02DG1tGsfUGXbTJyZDP+eu67haSw==" - }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - } - } - }, "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true }, "ws": { "version": "7.5.1", @@ -3633,51 +2558,11 @@ "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", "dev": true }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==" - }, "yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", "dev": true - }, - "yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - } - }, - "yargs-parser": { - "version": "20.2.4", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", - "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==" - }, - "yargs-unparser": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", - "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", - "requires": { - "camelcase": "^6.0.0", - "decamelize": "^4.0.0", - "flat": "^5.0.2", - "is-plain-obj": "^2.1.0" - } - }, - "yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==" } } } diff --git a/src/connector/node-red-contrib-tdengine/package.json b/src/connector/node-red-contrib-tdengine/package.json index fb467f23a9a0530e8d09cfba17f2b843263da5c3..4db92895bba3ac9158ff37ba32698de1320ce7e8 100644 --- a/src/connector/node-red-contrib-tdengine/package.json +++ b/src/connector/node-red-contrib-tdengine/package.json @@ -1,6 +1,6 @@ { "name": "node-red-contrib-tdengine", - "version": "0.0.2", + "version": "0.0.3", "description": "", "main": "tdengine.js", "repository": { @@ -10,8 +10,7 @@ "author": "kevinpan45@163.com", "license": "ISC", "dependencies": { - "axios": "^0.24.0", - "mocha": "^9.1.3" + "axios": "^0.24.0" }, "node-red": { "nodes": { @@ -23,7 +22,6 @@ "tdengine" ], "devDependencies": { - "node-red": "^2.1.4", - "node-red-node-test-helper": "^0.2.7" + "node-red": "^2.1.4" } } diff --git a/src/connector/node-rest/package-lock.json b/src/connector/node-rest/package-lock.json index 035b317fe72d030293fd2c56d3ee9999b7c59264..c60bffc65d4f1446e060695462b0bde54a28c22f 100644 --- a/src/connector/node-rest/package-lock.json +++ b/src/connector/node-rest/package-lock.json @@ -276,6 +276,11 @@ "which": "^2.0.1" } }, + "data-uri-to-buffer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.0.tgz", + "integrity": "sha512-Vr3mLBA8qWmcuschSLAOogKgQ/Jwxulv3RNE4FXnYWRGujzrRWQI4m12fQqRkwX06C0KanhLr4hK+GydchZsaA==" + }, "debug": { "version": "4.3.2", "resolved": "https://registry.nlark.com/debug/download/debug-4.3.2.tgz?cache=0&other_urls=https%3A%2F%2Fregistry.nlark.com%2Fdebug%2Fdownload%2Fdebug-4.3.2.tgz", @@ -549,6 +554,15 @@ "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", "dev": true }, + "fetch-blob": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.1.4.tgz", + "integrity": "sha512-Eq5Xv5+VlSrYWEqKrusxY1C3Hm/hjeAsCGVG3ft7pZahlUAChpGZT/Ms1WmSLnEAisEXszjzu/s+ce6HZB2VHA==", + "requires": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + } + }, "file-entry-cache": { "version": "6.0.1", "resolved": "https://registry.npm.taobao.org/file-entry-cache/download/file-entry-cache-6.0.1.tgz?cache=0&sync_timestamp=1613794546707&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Ffile-entry-cache%2Fdownload%2Ffile-entry-cache-6.0.1.tgz", @@ -580,6 +594,14 @@ "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=", "dev": true }, + "formdata-polyfill": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", + "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "requires": { + "fetch-blob": "^3.1.2" + } + }, "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.nlark.com/fs.realpath/download/fs.realpath-1.0.0.tgz", @@ -968,10 +990,20 @@ "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", "dev": true }, + "node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==" + }, "node-fetch": { - "version": "2.6.2", - "resolved": "https://registry.nlark.com/node-fetch/download/node-fetch-2.6.2.tgz?cache=0&sync_timestamp=1630935314150&other_urls=https%3A%2F%2Fregistry.nlark.com%2Fnode-fetch%2Fdownload%2Fnode-fetch-2.6.2.tgz", - "integrity": "sha1-mGmWgYtzeF5HsZZcw06wk6HUZNA=" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.1.1.tgz", + "integrity": "sha512-SMk+vKgU77PYotRdWzqZGTZeuFKlsJ0hu4KPviQKkfY+N3vn2MIzr0rvpnYpR8MtB3IEuhlEcuOLbGvLRlA+yg==", + "requires": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.3", + "formdata-polyfill": "^4.0.10" + } }, "object-inspect": { "version": "1.11.0", @@ -1331,6 +1363,11 @@ "integrity": "sha1-LeGWGMZtwkfc+2+ZM4A12CRaLO4=", "dev": true }, + "web-streams-polyfill": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.2.0.tgz", + "integrity": "sha512-EqPmREeOzttaLRm5HS7io98goBgZ7IVz79aDvqjD0kYXLtFZTc0T/U6wHTPKyIjb+MdN7DFIIX6hgdBEpWmfPA==" + }, "which": { "version": "2.0.2", "resolved": "https://registry.npm.taobao.org/which/download/which-2.0.2.tgz", diff --git a/src/connector/node-rest/package.json b/src/connector/node-rest/package.json index 3eab6fc289bf4e8a189fd117f2dfe7bc67321466..f314abd6463bd4fe5046f2ae68f338cd06acd250 100644 --- a/src/connector/node-rest/package.json +++ b/src/connector/node-rest/package.json @@ -18,6 +18,6 @@ "assert": "^2.0.0" }, "dependencies": { - "node-fetch": "^2.x" + "node-fetch": "^3.x" } } diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index c5d65b831a4803c4da76dc848027a963800bcae2..e6613027c707b7d46aa70b3b0feb3ebea137e5f0 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -83,25 +83,10 @@ extern const int32_t TYPE_BYTES[16]; #define TSDB_DATA_NULL_STR "NULL" #define TSDB_DATA_NULL_STR_L "null" - #define TSDB_DEFAULT_USER "root" - -#ifdef _TD_POWER_ -#define TSDB_DEFAULT_PASS "powerdb" -#elif (_TD_TQ_ == true) -#define TSDB_DEFAULT_PASS "tqueue" -#elif (_TD_PRO_ == true) -#define TSDB_DEFAULT_PASS "prodb" -#elif (_TD_KH_ == true) -#define TSDB_DEFAULT_PASS "khroot" -#elif (_TD_JH_ == true) -#define TSDB_DEFAULT_PASS "jhdata" -#else #define TSDB_DEFAULT_PASS "taosdata" -#endif #define SHELL_MAX_PASSWORD_LEN 20 - #define TSDB_TRUE 1 #define TSDB_FALSE 0 #define TSDB_OK 0 @@ -417,6 +402,11 @@ do { \ #define TSDB_DEFAULT_STABLES_HASH_SIZE 100 #define TSDB_DEFAULT_CTABLES_HASH_SIZE 20000 +#define TSDB_SHORTCUT_RB_RPC_SEND_SUBMIT 0x01u // RB: return before(global shortcut) +#define TSDB_SHORTCUT_RA_RPC_RECV_SUBMIT 0x02u // RA: return after(global shortcut) +#define TSDB_SHORTCUT_NR_VNODE_WAL_WRITE 0x04u // NR: no return and go on following actions(local shortcut) +#define TSDB_SHORTCUT_RB_TSDB_COMMIT 0x08u + #define TSDB_PORT_DNODESHELL 0 #define TSDB_PORT_DNODEDNODE 5 #define TSDB_PORT_SYNC 10 diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 26ce551e397fccfe6eb378aa0de2de771dfae10f..2c4d21037c9697e832bccf082595408c712d0670 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -335,8 +335,8 @@ typedef struct { typedef struct { int8_t extend; - char clientVersion[TSDB_VERSION_LEN]; - char msgVersion[TSDB_VERSION_LEN]; + char clientVersion[TSDB_VERSION_LEN]; // useless + char msgVersion[TSDB_VERSION_LEN]; // useless char db[TSDB_TABLE_FNAME_LEN]; char appName[TSDB_APPNAME_LEN]; int32_t pid; @@ -920,7 +920,7 @@ typedef struct { typedef struct { int8_t extend; - char clientVer[TSDB_VERSION_LEN]; + char clientVer[TSDB_VERSION_LEN]; // useless uint32_t connId; int32_t pid; int32_t numOfQueries; diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index eeff90bd5399c1ff2e08b1254fc63c9e53d3cbc3..4406a3ae5486b612cf9a3255b6d35f4e24823ad6 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -432,6 +432,16 @@ void* getJsonTagValueElment(void* data, char* key, int32_t keyLen, char* out, in void getJsonTagValueAll(void* data, void* dst, int16_t bytes); char* parseTagDatatoJson(void *p); +// +// scan callback +// + +// type define +#define READ_TABLE 1 +#define READ_QUERY 2 +typedef bool (*readover_callback)(void* param, int8_t type, int32_t tid); +void tsdbAddScanCallback(TsdbQueryHandleT* queryHandle, readover_callback callback, void* param); + #ifdef __cplusplus } #endif diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index dac31fc1f1b88581ed8976634fd767a3eddd2cea..b311361c438d033ad3f7582d30df7d1c33357c1d 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -37,20 +37,8 @@ ELSEIF (TD_WINDOWS) ENDIF () TARGET_LINK_LIBRARIES(shell taos_static cJson) + SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) - IF (TD_POWER) - SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME power) - ELSEIF (TD_TQ) - SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME tq) - ELSEIF (TD_PRO) - SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME prodbc) - ELSEIF (TD_KH) - SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME khclient) - ELSEIF (TD_JH) - SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME jh_taos) - ELSE () - SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) - ENDIF () ELSEIF (TD_DARWIN) LIST(APPEND SRC ./src/shellEngine.c) LIST(APPEND SRC ./src/shellMain.c) diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 9c5794278c5bd9545fb6260e4f8442d8c9e8cad9..7b22f89351a1247abebcd1b33cb8e2d394967dba 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -27,21 +27,7 @@ #define MAX_IP_SIZE 20 #define MAX_HISTORY_SIZE 1000 #define MAX_COMMAND_SIZE 1048586 - -#ifdef _TD_POWER_ - #define HISTORY_FILE ".power_history" -#elif (_TD_TQ_ == true) - #define HISTORY_FILE ".tq_history" -#elif (_TD_PRO_ == true) - #define HISTORY_FILE ".prodb_history" -#elif (_TD_KH_ == true) - #define HISTORY_FILE ".kh_history" -#elif (_TD_JH_ == true) - #define HISTORY_FILE ".jh_taos_history" -#else - #define HISTORY_FILE ".taos_history" -#endif - +#define HISTORY_FILE ".taos_history" #define DEFAULT_RES_SHOW_NUM 100 typedef struct SShellHistory { diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 7944cfc72845195b4f626cf8353a7ffed320cafb..21bc9ad06e26ae307fe34884c502d3ca6662e941 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -30,43 +30,11 @@ #include /**************** Global variables ****************/ -#ifdef _TD_POWER_ -char CLIENT_VERSION[] = "Welcome to the PowerDB shell from %s, Client Version:%s\n" - "Copyright (c) 2020 by PowerDB, Inc. All rights reserved.\n\n"; -char PROMPT_HEADER[] = "power> "; -char CONTINUE_PROMPT[] = " -> "; -int prompt_size = 7; -#elif (_TD_TQ_ == true) -char CLIENT_VERSION[] = "Welcome to the TQ shell from %s, Client Version:%s\n" - "Copyright (c) 2020 by TQ, Inc. All rights reserved.\n\n"; -char PROMPT_HEADER[] = "tq> "; -char CONTINUE_PROMPT[] = " -> "; -int prompt_size = 4; -#elif (_TD_PRO_ == true) -char CLIENT_VERSION[] = "Welcome to the ProDB shell from %s, Client Version:%s\n" - "Copyright (c) 2020 by Hanatech, Inc. All rights reserved.\n\n"; -char PROMPT_HEADER[] = "ProDB> "; -char CONTINUE_PROMPT[] = " -> "; -int prompt_size = 7; -#elif (_TD_KH_ == true) -char CLIENT_VERSION[] = "Welcome to the KingHistorian shell from %s, Client Version:%s\n" - "Copyright (c) 2021 by Hanatech, Inc. All rights reserved.\n\n"; -char PROMPT_HEADER[] = "kh> "; -char CONTINUE_PROMPT[] = " -> "; -int prompt_size = 4; -#elif (_TD_JH_ == true) -char CLIENT_VERSION[] = "Welcome to the jh_iot shell from %s, Client Version:%s\n" - "Copyright (c) 2021 by jinheng, Inc. All rights reserved.\n\n"; -char PROMPT_HEADER[] = "jh_taos> "; -char CONTINUE_PROMPT[] = " -> "; -int prompt_size = 9; -#else char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "taos> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; -#endif int64_t result = 0; SShellHistory history; diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index b1c85d951bf1f8cf801286f51b84d47d9c893b5c..131bce04a797f7c1ad7173b6655f1e41dac8d4ec 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -333,19 +333,7 @@ void *shellLoopQuery(void *arg) { } void get_history_path(char *history) { -#ifdef _TD_POWER_ - sprintf(history, "C:/PowerDB/%s", HISTORY_FILE); -#elif (_TD_TQ_ == true) - sprintf(history, "C:/TQueue/%s", HISTORY_FILE); -#elif (_TD_PRO_ == true) - sprintf(history, "C:/ProDB/%s", HISTORY_FILE); -#elif (_TD_KH_ == true) - sprintf(history, "C:/KingHistorian/%s", HISTORY_FILE); -#elif (_TD_JH_ == true) - sprintf(history, "C:/jh_iot/%s", HISTORY_FILE); -#else - sprintf(history, "C:/TDengine/%s", HISTORY_FILE); -#endif + sprintf(history, "C:/TDengine/%s", HISTORY_FILE); } void exitShell() { exit(EXIT_SUCCESS); } diff --git a/src/kit/taos-tools b/src/kit/taos-tools index dbc67af5f5a764e4c7950923e4e08741a7874168..d6baa48620fcbff857642c4ec10e3c48226ca97c 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit dbc67af5f5a764e4c7950923e4e08741a7874168 +Subproject commit d6baa48620fcbff857642c4ec10e3c48226ca97c diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 960987e7ad3b570548d9f8eb93a64ed963a5956b..bebb73eca2c48b8920bcbdc6a22e70aaf509046f 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -243,10 +243,6 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { } SHeartBeatMsg *pHBMsg = pMsg->rpcMsg.pCont; - if (taosCheckVersion(pHBMsg->clientVer, version, 3) != TSDB_CODE_SUCCESS) { - rpcFreeCont(pRsp); - return TSDB_CODE_TSC_INVALID_VERSION; // todo change the error code - } SRpcConnInfo connInfo = {0}; rpcGetConnInfo(pMsg->rpcMsg.handle, &connInfo); @@ -307,11 +303,6 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { goto connect_over; } - code = taosCheckVersion(pConnectMsg->clientVersion, version, 3); - if (code != TSDB_CODE_SUCCESS) { - goto connect_over; - } - SUserObj *pUser = pMsg->pUser; SAcctObj *pAcct = pUser->pAcct; diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c index 49db2329e68d09b16e92c696289e56d1d540b398..1dc7abb13ab2ad0e488cf532e8846d28673fd9a0 100644 --- a/src/os/src/linux/linuxEnv.c +++ b/src/os/src/linux/linuxEnv.c @@ -18,49 +18,12 @@ #include "tglobal.h" void osInit() { -#ifdef _TD_POWER_ - if (configDir[0] == 0) { - strcpy(configDir, "/etc/power"); - } - strcpy(tsDataDir, "/var/lib/power"); - strcpy(tsLogDir, "/var/log/power"); - strcpy(tsScriptDir, "/etc/power"); -#elif (_TD_TQ_ == true) - if (configDir[0] == 0) { - strcpy(configDir, "/etc/tq"); - } - strcpy(tsDataDir, "/var/lib/tq"); - strcpy(tsLogDir, "/var/log/tq"); - strcpy(tsScriptDir, "/etc/tq"); -#elif (_TD_PRO_ == true) - if (configDir[0] == 0) { - strcpy(configDir, "/etc/ProDB"); - } - strcpy(tsDataDir, "/var/lib/ProDB"); - strcpy(tsLogDir, "/var/log/ProDB"); - strcpy(tsScriptDir, "/etc/ProDB"); -#elif (_TD_KH_ == true) - if (configDir[0] == 0) { - strcpy(configDir, "/etc/kinghistorian"); - } - strcpy(tsDataDir, "/var/lib/kinghistorian"); - strcpy(tsLogDir, "/var/log/kinghistorian"); - strcpy(tsScriptDir, "/etc/kinghistorian"); -#elif (_TD_JH_ == true) - if (configDir[0] == 0) { - strcpy(configDir, "/etc/jh_taos"); - } - strcpy(tsDataDir, "/var/lib/jh_taos"); - strcpy(tsLogDir, "/var/log/jh_taos"); - strcpy(tsScriptDir, "/etc/jh_taos"); -#else if (configDir[0] == 0) { strcpy(configDir, "/etc/taos"); } strcpy(tsDataDir, "/var/lib/taos"); strcpy(tsLogDir, "/var/log/taos"); strcpy(tsScriptDir, "/etc/taos"); -#endif strcpy(tsVnodeDir, ""); strcpy(tsDnodeDir, ""); diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c index 6e087c9b29d7468b7c5a4e82c0f69b38f2c01223..7a9f571078aff954148137696a41758746745058 100644 --- a/src/os/src/windows/wEnv.c +++ b/src/os/src/windows/wEnv.c @@ -22,58 +22,13 @@ extern void taosWinSocketInit(); void osInit() { taosSetCoreDump(); -#ifdef _TD_POWER_ - if (configDir[0] == 0) { - strcpy(configDir, "C:/PowerDB/cfg"); - } - - strcpy(tsVnodeDir, "C:/PowerDB/data"); - strcpy(tsDataDir, "C:/PowerDB/data"); - strcpy(tsLogDir, "C:/PowerDB/log"); - strcpy(tsScriptDir, "C:/PowerDB/script"); -#elif (_TD_TQ_ == true) - if (configDir[0] == 0) { - strcpy(configDir, "C:/TQueue/cfg"); - } - strcpy(tsVnodeDir, "C:/TQueue/data"); - strcpy(tsDataDir, "C:/TQueue/data"); - strcpy(tsLogDir, "C:/TQueue/log"); - strcpy(tsScriptDir, "C:/TQueue/script"); -#elif (_TD_PRO_ == true) - if (configDir[0] == 0) { - strcpy(configDir, "C:/ProDB/cfg"); - } - strcpy(tsVnodeDir, "C:/ProDB/data"); - strcpy(tsDataDir, "C:/ProDB/data"); - strcpy(tsLogDir, "C:/ProDB/log"); - strcpy(tsScriptDir, "C:/ProDB/script"); -#elif (_TD_KH_ == true) - if (configDir[0] == 0) { - strcpy(configDir, "C:/KingHistorian/cfg"); - } - strcpy(tsVnodeDir, "C:/KingHistorian/data"); - strcpy(tsDataDir, "C:/KingHistorian/data"); - strcpy(tsLogDir, "C:/KingHistorian/log"); - strcpy(tsScriptDir, "C:/KingHistorian/script"); -#elif (_TD_JH_ == true) - if (configDir[0] == 0) { - strcpy(configDir, "C:/jh_iot/cfg"); - } - strcpy(tsVnodeDir, "C:/jh_iot/data"); - strcpy(tsDataDir, "C:/jh_iot/data"); - strcpy(tsLogDir, "C:/jh_iot/log"); - strcpy(tsScriptDir, "C:/jh_iot/script"); -#else if (configDir[0] == 0) { strcpy(configDir, "C:/TDengine/cfg"); } - strcpy(tsVnodeDir, "C:/TDengine/data"); strcpy(tsDataDir, "C:/TDengine/data"); strcpy(tsLogDir, "C:/TDengine/log"); strcpy(tsScriptDir, "C:/TDengine/script"); -#endif - strcpy(tsDnodeDir, ""); strcpy(tsMnodeDir, ""); strcpy(tsOsName, "Windows"); diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index ef955a5663d39f0afcf399a6c15557b8c044d6c7..6ac3878df901923ed5a5fcc77fe6d63969be3a59 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -12,7 +12,7 @@ ELSEIF(TD_BUILD_TAOSA_INTERNAL) MESSAGE("${Yellow} use taosa internal as httpd ${ColourReset}") ELSE () MESSAGE("") - MESSAGE("${Green} use taosadapter as httpd ${ColourReset}") + MESSAGE("${Green} use taosadapter as httpd, platform is ${PLATFORM_ARCH_STR} ${ColourReset}") EXECUTE_PROCESS( COMMAND git rev-parse --abbrev-ref HEAD @@ -26,7 +26,7 @@ ELSE () STRING(SUBSTRING "${taos_version}" 12 -1 taos_version) STRING(STRIP "${taos_version}" taos_version) ELSE () - STRING(CONCAT taos_version "branch_" "${taos_version}") + STRING(CONCAT taos_version "_branch_" "${taos_version}") STRING(STRIP "${taos_version}" taos_version) ENDIF () EXECUTE_PROCESS( @@ -61,7 +61,7 @@ ELSE () COMMAND git clean -f -d BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || : + COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || : COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index 68bd98dd5e0ed343e9a9966a8e75ffe4493a4cfb..33b95642b9d2757942d14d7608e7ae5169784f42 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -1157,7 +1157,9 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { monError("failed to save vgroup_%d info, reason: invalid row %s len, sql:%s", vgId, (char *)row[i], tsMonitor.sql); goto DONE; } - pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); + char tmpBuf[10] = {0}; + memcpy(tmpBuf, row[i], charLen); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, tmpBuf); } else if (strcmp(fields[i].name, "onlines") == 0) { pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); } else if (v_dnode_str && strcmp(v_dnode_str, "_dnode") == 0) { diff --git a/src/plugins/taosadapter b/src/plugins/taosadapter index 8f9501a30b1893c6616d644a924c995aa21ad957..6d401bb95e1125ce4aad012dc23191ed85af8b3b 160000 --- a/src/plugins/taosadapter +++ b/src/plugins/taosadapter @@ -1 +1 @@ -Subproject commit 8f9501a30b1893c6616d644a924c995aa21ad957 +Subproject commit 6d401bb95e1125ce4aad012dc23191ed85af8b3b diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 0b938078e39e8a61d3c2d871192717fdc4dc82e7..bf3555f0920bce9b31e8ea6c5614da4379aac83a 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -325,6 +325,8 @@ typedef struct SQueryRuntimeEnv { SHashObj *pTableRetrieveTsMap; SUdfInfo *pUdfInfo; bool udfIsCopy; + SHashObj *pTablesRead; // record child tables already read rows by tid hash + int32_t cntTableReadOver; // read table over count } SQueryRuntimeEnv; enum { @@ -721,4 +723,10 @@ void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t int32_t getColumnDataFromId(void *param, int32_t id, void **data); void qInfoLogSSDataBlock(SSDataBlock* block, char* location); + +// add table read rows count. pHashTables must not be NULL +void addTableReadRows(SQueryRuntimeEnv* pEnv, int32_t tid, int32_t rows); +// tsdb scan table callback table or query is over. param is SQueryRuntimeEnv* +bool qReadOverCB(void* param, int8_t type, int32_t tid); + #endif // TDENGINE_QEXECUTOR_H diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 19998b78ecfe670b544b87477eeec684a8b0b7a2..e8aea259ce3718b13d697e84db54ebd7fe8a6bd6 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -859,7 +859,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { X.n += Z.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -869,10 +869,10 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { X.n += F.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, false); toTSDBType(Z.type); - A = tVariantListAppendToken(A, &Z, -1, true); + A = tVariantListAppendToken(A, &Z, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -882,7 +882,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { X.n += F.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, false); A = tVariantListAppend(A, &Z, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); @@ -906,7 +906,7 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). { X.n += F.n; toTSDBType(A.type); - SArray* K = tVariantListAppendToken(NULL, &A, -1, true); + SArray* K = tVariantListAppendToken(NULL, &A, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -928,7 +928,7 @@ cmd ::= ALTER STABLE ids(X) cpxName(Z) DROP TAG ids(Y). { X.n += Z.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -938,10 +938,10 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { X.n += F.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, false); toTSDBType(Z.type); - A = tVariantListAppendToken(A, &Z, -1, true); + A = tVariantListAppendToken(A, &Z, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -951,7 +951,7 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { X.n += F.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, false); A = tVariantListAppend(A, &Z, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 030ec9275ada5a24b6eae88fb8239c1633c92e94..50f5babcdd0a2534e1e9cdd6722acf0e87c78b8f 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -204,6 +204,7 @@ typedef struct SElapsedInfo { typedef struct { bool valueAssigned; + bool ignoreNegative; union { int64_t i64Prev; double d64Prev; @@ -3085,6 +3086,7 @@ static bool diff_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResIn SDiffFuncInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); pDiffInfo->valueAssigned = false; pDiffInfo->i64Prev = 0; + pDiffInfo->ignoreNegative = (pCtx->param[0].i64 == 1) ? true : false; return true; } @@ -3317,6 +3319,9 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } + if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { + continue; + } if (pDiffInfo->valueAssigned) { *pOutput = (int32_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null @@ -3339,6 +3344,9 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } + if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { + continue; + } if (pDiffInfo->valueAssigned) { *pOutput = pData[i] - pDiffInfo->i64Prev; // direct previous may be null @@ -3361,6 +3369,9 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } + if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { + continue; + } if (pDiffInfo->valueAssigned) { // initial value is not set yet SET_DOUBLE_VAL(pOutput, pData[i] - pDiffInfo->d64Prev); // direct previous may be null @@ -3383,6 +3394,9 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } + if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { + continue; + } if (pDiffInfo->valueAssigned) { // initial value is not set yet *pOutput = (float)(pData[i] - pDiffInfo->d64Prev); // direct previous may be null @@ -3405,6 +3419,9 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { continue; } + if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { + continue; + } if (pDiffInfo->valueAssigned) { // initial value is not set yet *pOutput = (int16_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null @@ -3428,6 +3445,9 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pCtx->hasNull && isNull((char *)&pData[i], pCtx->inputType)) { continue; } + if ((pDiffInfo->ignoreNegative) && (pData[i] < 0)) { + continue; + } if (pDiffInfo->valueAssigned) { // initial value is not set yet *pOutput = (int8_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index c1bd818a58426da2c64cf16dca754b64ef2bd1e5..5fa5186479d43021994c4c4c4f481e181045fb96 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2057,6 +2057,22 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize); pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen); + // malloc pTablesRead value if super table && project query and && has order by && limit is true + if( pRuntimeEnv->pQueryHandle && // client merge no tsdb query, so pQueryHandle is NULL, except client merge case in here + pQueryAttr->limit.limit > 0 && + pQueryAttr->limit.offset == 0 && // if have offset, ignore limit optimization + pQueryAttr->stableQuery && + isProjQuery(pQueryAttr) && + pQueryAttr->order.orderColId != -1 ) { + // can be optimizate limit + pRuntimeEnv->pTablesRead = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + if (pRuntimeEnv->pTablesRead) // must malloc ok, set callback to tsdb + tsdbAddScanCallback(pRuntimeEnv->pQueryHandle, qReadOverCB, pRuntimeEnv); + } else { + pRuntimeEnv->pTablesRead = NULL; + } + pRuntimeEnv->cntTableReadOver= 0; + // NOTE: pTableCheckInfo need to update the query time range and the lastKey info pRuntimeEnv->pTableRetrieveTsMap = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); @@ -5990,6 +6006,9 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; SSDataBlock* pBlock = pProjectInfo->existDataBlock; + // record table read rows + addTableReadRows(pRuntimeEnv, pBlock->info.tid, pBlock->info.rows); + pProjectInfo->existDataBlock = NULL; *newgroup = true; @@ -6035,6 +6054,9 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { doSetOperatorCompleted(pOperator); break; } + + // record table read rows + addTableReadRows(pRuntimeEnv, pBlock->info.tid, pBlock->info.rows); // Return result of the previous group in the firstly. if (*newgroup) { @@ -8471,10 +8493,6 @@ static int32_t deserializeColFilterInfo(SColumnFilterInfo* pColFilters, int16_t int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { int32_t code = TSDB_CODE_SUCCESS; - if (taosCheckVersion(pQueryMsg->version, version, 3) != 0) { - return TSDB_CODE_QRY_INVALID_MSG; - } - pQueryMsg->numOfTables = htonl(pQueryMsg->numOfTables); pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey); pQueryMsg->window.ekey = htobe64(pQueryMsg->window.ekey); @@ -10023,3 +10041,69 @@ void freeQueryAttr(SQueryAttr* pQueryAttr) { filterFreeInfo(pQueryAttr->pFilters); } } + +// add table read rows count. pHashTables must not be NULL +void addTableReadRows(SQueryRuntimeEnv* pEnv, int32_t tid, int32_t rows) { + SHashObj* pHashObj = pEnv->pTablesRead; + int32_t limit = (int32_t)pEnv->pQueryAttr->limit.limit; + if (pHashObj == NULL) { + return ; + } + + // read old value + int32_t v = 0; + int32_t* pv = (int32_t* )taosHashGet(pHashObj, &tid, sizeof(int32_t)); + if (pv && *pv > 0) { + v = *pv; + } + + bool over = v >= limit; + // add new and save + v += rows; + taosHashPut(pHashObj, &tid, sizeof(int32_t), &rows, sizeof(int32_t)); + + // update read table over cnt + if (!over && v >= limit) { + pEnv->cntTableReadOver += 1; + } +} + +// tsdb scan table callback table or query is over. param is SQueryRuntimeEnv* +bool qReadOverCB(void* param, int8_t type, int32_t tid) { + SQueryRuntimeEnv* pEnv = (SQueryRuntimeEnv* )param; + if (pEnv->pTablesRead == NULL) { + return false; + } + + // check query is over + if (pEnv->cntTableReadOver >= pEnv->pQueryAttr->tableGroupInfo.numOfTables) { + return true; + } + + // if type is read_query can return + if (type == READ_QUERY) { + return false; + } + + // read tid value + int32_t* pv = (int32_t* )taosHashGet(pEnv->pTablesRead, &tid, sizeof(int32_t)); + if (pv == NULL) { + return false; + } + + // compare + if (pEnv->pQueryAttr->limit.limit > 0 && *pv >= pEnv->pQueryAttr->limit.limit ) { + return true; // need data is read ok + } + + return false; +} + +// check query read is over, retur true over. param is SQueryRuntimeEnv* +bool queryReadOverCB(void* param) { + SQueryRuntimeEnv* pEnv = (SQueryRuntimeEnv* )param; + if (pEnv->cntTableReadOver >= pEnv->pQueryAttr->tableGroupInfo.numOfTables) { + return true; + } + return false; +} \ No newline at end of file diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index 9afd9609ee7b8817a390b0e12d705e5d678593aa..8d63ab4f91ca2de8e246293dafef1b31e93c3e22 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -927,7 +927,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u); assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE)); } else { - assert(optr == TSDB_RELATION_ISNULL || optr == TSDB_RELATION_NOTNULL || optr == FILTER_DUMMY_EMPTY_OPTR); + if(optr != TSDB_RELATION_ISNULL && optr != TSDB_RELATION_NOTNULL && optr != FILTER_DUMMY_EMPTY_OPTR){ + return -1; + } } SFilterField *col = FILTER_UNIT_LEFT_FIELD(info, u); @@ -1257,7 +1259,8 @@ int32_t filterAddGroupUnitFromNode(SFilterInfo *info, tExprNode* tree, SArray *g } else { filterAddFieldFromNode(info, tree->_node.pRight, &right); - filterAddUnit(info, tree->_node.optr, &left, &right, &uidx); + ret = filterAddUnit(info, tree->_node.optr, &left, &right, &uidx); + CHK_LRET(ret != TSDB_CODE_SUCCESS, TSDB_CODE_QRY_APP_ERROR, "invalid where condition"); SFilterGroup fgroup = {0}; filterAddUnitToGroup(&fgroup, uidx); @@ -1282,7 +1285,7 @@ int32_t filterAddUnitFromUnit(SFilterInfo *dst, SFilterInfo *src, SFilterUnit* u void *data = FILTER_UNIT_VAL_DATA(src, u); if (IS_VAR_DATA_TYPE(type)) { if (FILTER_UNIT_OPTR(u) == TSDB_RELATION_IN) { - filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, 0, false); + filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, sizeof(SHashObj), false); t = FILTER_GET_FIELD(dst, right); @@ -1574,7 +1577,9 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit); SSchema *sch = left->desc; - len = sprintf(str, "UNIT[%d] => [%d][%s] %s [", i, sch->colId, sch->name, gOptrStr[unit->compare.optr].str); + if (unit->compare.optr >= TSDB_RELATION_INVALID && unit->compare.optr <= TSDB_RELATION_CONTAINS){ + len = sprintf(str, "UNIT[%d] => [%d][%s] %s [", i, sch->colId, sch->name, gOptrStr[unit->compare.optr].str); + } if (unit->right.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != TSDB_RELATION_IN) { SFilterField *right = FILTER_UNIT_RIGHT_FIELD(info, unit); @@ -1591,7 +1596,9 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) if (unit->compare.optr2) { strcat(str, " && "); - sprintf(str + strlen(str), "[%d][%s] %s [", sch->colId, sch->name, gOptrStr[unit->compare.optr2].str); + if (unit->compare.optr2 >= TSDB_RELATION_INVALID && unit->compare.optr2 <= TSDB_RELATION_CONTAINS){ + sprintf(str + strlen(str), "[%d][%s] %s [", sch->colId, sch->name, gOptrStr[unit->compare.optr2].str); + } if (unit->right2.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != TSDB_RELATION_IN) { SFilterField *right = FILTER_UNIT_RIGHT2_FIELD(info, unit); @@ -3588,7 +3595,7 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar if (FILTER_EMPTY_RES(info) || FILTER_ALL_RES(info)) { return TSDB_CODE_SUCCESS; } - + for (uint32_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) { SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i]; int32_t type = FILTER_GET_COL_FIELD_TYPE(fi); @@ -3602,6 +3609,15 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar char *src = FILTER_GET_COL_FIELD_DATA(fi, j); char *dst = FILTER_GET_COL_FIELD_DATA(&nfi, j); int32_t len = 0; + char *varSrc = varDataVal(src); + size_t k = 0, varSrcLen = varDataLen(src); + while (k < varSrcLen && varSrc[k++] == -1) {} + if (k == varSrcLen) { + /* NULL */ + varDataLen(dst) = (VarDataLenT) varSrcLen; + varDataCopy(dst, src); + continue; + } bool ret = taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len); if(!ret) { qError("filterConverNcharColumns taosMbsToUcs4 error"); diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index f927287015bf56f09c99d992b18fd2d226cb15f5..707be41c5842efe1810eb8a478e9e25ad402a8ba 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -18,6 +18,7 @@ #include "taosdef.h" #include "taosmsg.h" #include "tcmdtype.h" +#include "tcompare.h" #include "tstrbuild.h" #include "ttoken.h" #include "ttokendef.h" @@ -318,12 +319,17 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { } if ((pLeft != NULL && pRight != NULL) && - (optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM)) { + (optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM || + optrType == TK_EQ || optrType == TK_NE || optrType == TK_LT || optrType == TK_GT || optrType == TK_LE || optrType == TK_GE || + optrType == TK_AND || optrType == TK_OR)) { /* * if a exprToken is noted as the TK_TIMESTAMP, the time precision is microsecond * Otherwise, the time precision is adaptive, determined by the time precision from databases. */ if ((pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_INTEGER) || + (pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_BOOL) || + (pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_BOOL) || + (pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_INTEGER) || (pLeft->tokenId == TK_TIMESTAMP && pRight->tokenId == TK_TIMESTAMP)) { pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; pExpr->tokenId = pLeft->tokenId; @@ -360,12 +366,46 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { pExpr->value.i64 = pLeft->value.i64 % pRight->value.i64; break; } + case TK_EQ: { + pExpr->value.i64 = (pLeft->value.i64 == pRight->value.i64) ? 1 : 0; + break; + } + case TK_NE: { + pExpr->value.i64 = (pLeft->value.i64 != pRight->value.i64) ? 1 : 0; + break; + } + case TK_LT: { + pExpr->value.i64 = (pLeft->value.i64 < pRight->value.i64) ? 1 : 0; + break; + } + case TK_GT: { + pExpr->value.i64 = (pLeft->value.i64 > pRight->value.i64) ? 1 : 0; + break; + } + case TK_LE: { + pExpr->value.i64 = (pLeft->value.i64 <= pRight->value.i64) ? 1 : 0; + break; + } + case TK_GE: { + pExpr->value.i64 = (pLeft->value.i64 >= pRight->value.i64) ? 1 : 0; + break; + } + case TK_AND: { + pExpr->value.i64 = (pLeft->value.i64 && pRight->value.i64) ? 1 : 0; + break; + } + case TK_OR: { + pExpr->value.i64 = (pLeft->value.i64 || pRight->value.i64) ? 1 : 0; + break; + } } tSqlExprDestroy(pLeft); tSqlExprDestroy(pRight); } else if ((pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_INTEGER) || (pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_FLOAT) || + (pLeft->tokenId == TK_BOOL && pRight->tokenId == TK_FLOAT) || + (pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_BOOL) || (pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_FLOAT)) { pExpr->value.nType = TSDB_DATA_TYPE_DOUBLE; pExpr->tokenId = TK_FLOAT; @@ -395,6 +435,80 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { pExpr->value.dKey = left - ((int64_t)(left / right)) * right; break; } + case TK_EQ: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (compareDoubleVal(&left, &right) == 0) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_NE: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (compareDoubleVal(&left, &right) != 0) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_LT: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (compareDoubleVal(&left, &right) == -1) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_GT: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (compareDoubleVal(&left, &right) == 1) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_LE: { + int32_t res = compareDoubleVal(&left, &right); + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (res == 0 || res == -1) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_GE: { + int32_t res = compareDoubleVal(&left, &right); + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + if (res == 0 || res == 1) { + pExpr->value.i64 = 1; + } else { + pExpr->value.i64 = 0; + } + break; + } + case TK_AND: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + pExpr->value.i64 = (left && right) ? 1 : 0; + break; + } + case TK_OR: { + pExpr->tokenId = TK_INTEGER; + pExpr->value.nType = TSDB_DATA_TYPE_BIGINT; + pExpr->value.i64 = (left || right) ? 1 : 0; + break; + } } tSqlExprDestroy(pLeft); @@ -505,7 +619,7 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) { tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr)); memcpy(pExpr, pSrc, sizeof(*pSrc)); - + if (pSrc->pLeft) { pExpr->pLeft = tSqlExprClone(pSrc->pLeft); } @@ -518,7 +632,7 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) { tVariantAssign(&pExpr->value, &pSrc->value); //we don't clone paramList now because clone is only used for between/and - assert(pSrc->Expr.paramList == NULL); + pExpr->Expr.paramList = NULL; return pExpr; } diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 4504b84f61161d31e2785c14fdd8ffbbfe11a606..38115561fbc7627f80f1bf8cd26dd4ad372ccae5 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -3840,7 +3840,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, true); + SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -3851,10 +3851,10 @@ static YYACTIONTYPE yy_reduce( yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; toTSDBType(yymsp[-1].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, true); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, false); toTSDBType(yymsp[0].minor.yy0.type); - A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1, true); + A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -3865,7 +3865,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1, true); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1, false); A = tVariantListAppend(A, &yymsp[0].minor.yy162, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); @@ -3891,7 +3891,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, true); + SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -3916,7 +3916,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, true); + SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -3927,10 +3927,10 @@ static YYACTIONTYPE yy_reduce( yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; toTSDBType(yymsp[-1].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, true); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, false); toTSDBType(yymsp[0].minor.yy0.type); - A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1, true); + A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -3941,7 +3941,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1, true); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1, false); A = tVariantListAppend(A, &yymsp[0].minor.yy162, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index e8d24b392e612e94e3cbfa6f4fa7b3ea61a8810a..34e9ddf8193f999d598bd367ba75556a36cd0c78 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -1165,6 +1165,19 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqConte rpcMsg.handle = pConn; rpcAddRef(pRpc); // add the refCount for requests + switch (rpcMsg.msgType) { + case TSDB_MSG_TYPE_SUBMIT: + if (tsShortcutFlag & TSDB_SHORTCUT_RA_RPC_RECV_SUBMIT) { + SRpcMsg rMsg = {.handle = rpcMsg.handle, .pCont = NULL, .contLen = 0}; + rpcSendResponse(&rMsg); + rpcFreeCont(rpcMsg.pCont); + return; + } + break; + default: + break; + } + // notify the server app (*(pRpc->cfp))(&rpcMsg, NULL); } else { diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c index 086a390cb8ea2a95f576cb1bff81dfc79769863a..46313543d861ab1a2b56a236b0416cb373295bb7 100644 --- a/src/rpc/src/rpcUdp.c +++ b/src/rpc/src/rpcUdp.c @@ -186,9 +186,11 @@ static void *taosRecvUdpData(void *param) { SUdpConn *pConn = param; struct sockaddr_in sourceAdd; ssize_t dataLen; + int32_t msgLen; unsigned int addLen; uint16_t port; SRecvInfo recvInfo; + SRpcHead *pHead; memset(&sourceAdd, 0, sizeof(sourceAdd)); addLen = sizeof(sourceAdd); @@ -218,6 +220,13 @@ static void *taosRecvUdpData(void *param) { continue; } + pHead = (SRpcHead *)msg; + msgLen = (int32_t)htonl((uint32_t)pHead->msgLen); + if (dataLen < msgLen) { + tError("%s recvfrom failed(%s): dataLen: %ld, msgLen: %d", pConn->label, strerror(errno), (long)dataLen, msgLen); + continue; + } + int32_t size = dataLen + tsRpcOverhead; char *tmsg = malloc(size); if (NULL == tmsg) { diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index db675d0427901f55e676a17592f0c131820e8718..8327f259f71e62c188183d2413d75a18451e2a72 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -98,6 +98,11 @@ void *tsdbCommitData(STsdbRepo *pRepo) { } tsdbStartCommit(pRepo); + if (tsShortcutFlag & TSDB_SHORTCUT_RB_TSDB_COMMIT) { + tsdbEndCommit(pRepo, terrno); + return NULL; + } + // Commit to update meta file if (tsdbCommitMeta(pRepo) < 0) { tsdbError("vgId:%d error occurs while committing META data since %s", REPO_ID(pRepo), tstrerror(terrno)); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 4f0ba6eca1bedf20adc9230591d2ce3b01d4e060..4d14f7307b7be0c68e1251cb5a0db64baa9bde30 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -39,6 +39,9 @@ .tid = (_checkInfo)->tableId.tid, \ .uid = (_checkInfo)->tableId.uid}) +#define IS_END_BLOCK(cur, numOfBlocks, ascTrav) \ + ((cur->slot == numOfBlocks - 1 && ascTrav) || (cur->slot == 0 && !ascTrav)) + // limit offset start optimization for rows read over this value #define OFFSET_SKIP_THRESHOLD 5000 @@ -153,6 +156,10 @@ typedef struct STsdbQueryHandle { SArray *prev; // previous row which is before than time window SArray *next; // next row which is after the query time window SIOCostSummary cost; + + // callback + readover_callback readover_cb; + void* param; } STsdbQueryHandle; typedef struct STableGroupSupporter { @@ -182,6 +189,7 @@ static void* doFreeColumnInfoData(SArray* pColumnInfoData); static void* destroyTableCheckInfo(SArray* pTableCheckInfo); static bool tsdbGetExternalRow(TsdbQueryHandleT pHandle); static int32_t tsdbQueryTableList(STable* pTable, SArray* pRes, void* filterInfo); +static STableBlockInfo* moveToNextDataBlockInCurrentFile(STsdbQueryHandle* pQueryHandle); static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) { pBlockLoadInfo->slot = -1; @@ -2560,26 +2568,25 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exists); static int32_t getDataBlockRv(STsdbQueryHandle* pQueryHandle, STableBlockInfo* pNext, bool *exists) { - int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1; SQueryFilePos* cur = &pQueryHandle->cur; - while(1) { + while(pNext) { int32_t code = loadFileDataBlock(pQueryHandle, pNext->compBlock, pNext->pTableCheckInfo, exists); + // load error or have data, return if (code != TSDB_CODE_SUCCESS || *exists) { return code; } - if ((cur->slot == pQueryHandle->numOfBlocks - 1 && ASCENDING_TRAVERSE(pQueryHandle->order)) || - (cur->slot == 0 && !ASCENDING_TRAVERSE(pQueryHandle->order))) { + // no data, continue to find next block util have data + if (IS_END_BLOCK(cur, pQueryHandle->numOfBlocks, ASCENDING_TRAVERSE(pQueryHandle->order))) { // all data blocks in current file has been checked already, try next file if exists return getFirstFileDataBlock(pQueryHandle, exists); } else { // next block of the same file - cur->slot += step; - cur->mixBlock = false; - cur->blockCompleted = false; - pNext = &pQueryHandle->pDataBlockInfo[cur->slot]; + pNext = moveToNextDataBlockInCurrentFile(pQueryHandle); } } + + return TSDB_CODE_SUCCESS; // pNext == NULL no other blocks to move to } static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exists) { @@ -2594,6 +2601,15 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist STsdbCfg* pCfg = &pQueryHandle->pTsdb->config; STimeWindow win = TSWINDOW_INITIALIZER; + // check query scan data is over for limit query + if (pQueryHandle->readover_cb && pQueryHandle->readover_cb(pQueryHandle->param, READ_QUERY, -1)) { + // query scan data is over , no need read more + cur->fid = INT32_MIN; + *exists = false; + tsdbInfo("%p LIMIT_READ query is over and stop read. tables=%d qId=0x%"PRIx64, pQueryHandle, numOfTables, pQueryHandle->qId); + return TSDB_CODE_SUCCESS; + } + while (true) { tsdbRLockFS(REPO_FS(pQueryHandle->pTsdb)); @@ -2670,20 +2686,52 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist return getDataBlockRv(pQueryHandle, pBlockInfo, exists); } -static bool isEndFileDataBlock(SQueryFilePos* cur, int32_t numOfBlocks, bool ascTrav) { - assert(cur != NULL && numOfBlocks > 0); - return (cur->slot == numOfBlocks - 1 && ascTrav) || (cur->slot == 0 && !ascTrav); -} - -static void moveToNextDataBlockInCurrentFile(STsdbQueryHandle* pQueryHandle) { +static STableBlockInfo* moveToNextDataBlockInCurrentFile(STsdbQueryHandle* pQueryHandle) { int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1 : -1; SQueryFilePos* cur = &pQueryHandle->cur; + if (IS_END_BLOCK(cur, pQueryHandle->numOfBlocks, ASCENDING_TRAVERSE(pQueryHandle->order))) { + return NULL; + } assert(cur->slot < pQueryHandle->numOfBlocks && cur->slot >= 0); cur->slot += step; cur->mixBlock = false; cur->blockCompleted = false; + + // no callback check + STableBlockInfo* pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot]; + if(pQueryHandle->readover_cb == NULL) { + return pBlockInfo; + } + + // have callback check + int32_t tid = -1; + bool over = false; + do { + // tid changed, re-get over of tid status + if(tid != pBlockInfo->pTableCheckInfo->tableId.tid) { + tid = pBlockInfo->pTableCheckInfo->tableId.tid; + over = pQueryHandle->readover_cb(pQueryHandle->param, READ_TABLE, pBlockInfo->pTableCheckInfo->tableId.tid); + if (!over) // this tid not over + return pBlockInfo; + } + + // + // this tid is over, skip all blocks of this tid in following + // + + // check end + if (IS_END_BLOCK(cur, pQueryHandle->numOfBlocks, ASCENDING_TRAVERSE(pQueryHandle->order))) + return NULL; + // move next + cur->slot += step; + cur->mixBlock = false; + cur->blockCompleted = false; + pBlockInfo = &pQueryHandle->pDataBlockInfo[cur->slot]; + } while(1); + + return NULL; } int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist* pTableBlockInfo) { @@ -2816,12 +2864,15 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists // current block is empty, try next block in file // all data blocks in current file has been checked already, try next file if exists - if (isEndFileDataBlock(cur, pQueryHandle->numOfBlocks, ASCENDING_TRAVERSE(pQueryHandle->order))) { + if (IS_END_BLOCK(cur, pQueryHandle->numOfBlocks, ASCENDING_TRAVERSE(pQueryHandle->order))) { return getFirstFileDataBlock(pQueryHandle, exists); } else { - moveToNextDataBlockInCurrentFile(pQueryHandle); - STableBlockInfo* pNext = &pQueryHandle->pDataBlockInfo[cur->slot]; - return getDataBlockRv(pQueryHandle, pNext, exists); + // get next block in currentfile. return NULL if no block in current file + STableBlockInfo* pNext = moveToNextDataBlockInCurrentFile(pQueryHandle); + if (pNext == NULL) // file end + return getFirstFileDataBlock(pQueryHandle, exists); + else + return getDataBlockRv(pQueryHandle, pNext, exists); } } } @@ -4599,4 +4650,12 @@ int64_t tsdbSkipOffset(TsdbQueryHandleT queryHandle) { return pQueryHandle->srows; } return 0; +} + +// add scan table need callback +void tsdbAddScanCallback(TsdbQueryHandleT* queryHandle, readover_callback callback, void* param) { + STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*)queryHandle; + pQueryHandle->readover_cb = callback; + pQueryHandle->param = param; + return ; } \ No newline at end of file diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index fd9a340a25a752b18ab07a8fbb2691038af3b71b..752930ed7e762eac31c77b8c1c1a91aa626ef16a 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 131 +#define TSDB_CFG_MAX_NUM 132 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 diff --git a/src/util/inc/tutil.h b/src/util/inc/tutil.h index 8756ed49dee5d721096877dbe90ad04d448e1c21..a97b731f93c214536c009e94d05edc5c3d6e7ee9 100644 --- a/src/util/inc/tutil.h +++ b/src/util/inc/tutil.h @@ -41,9 +41,6 @@ char * paGetToken(char *src, char **token, int32_t *tokenLen); int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]); int32_t taosHexStrToByteArray(char hexstr[], char bytes[]); -bool taosGetVersionNumber(char *versionStr, int *versionNubmer); -int taosCheckVersion(char *input_client_version, char *input_server_version, int compared_segments); - char * taosIpStr(uint32_t ipInt); uint32_t ip2uint(const char *const ip_addr); void jsonKeyMd5(void *pMsg, int msgLen, void *pKey); diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 9b14084d29e2e5c3ca2ed4c54c069e5ca017005e..fa33d436d4a7cea62e235c683f76ce1b9dc4d5b8 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -599,6 +599,7 @@ void taosHashClear(SHashObj *pHashObj) { __wr_unlock(&pHashObj->lock, pHashObj->type); } +// the input paras should be SHashObj **, so the origin input will be set by tfree(*pHashObj) void taosHashCleanup(SHashObj *pHashObj) { if (pHashObj == NULL) { return; diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 2a49862bac14633f77db921b7d2e17b160019425..11fa0691a41efafdded6d76ee31631f4b631171f 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -374,25 +374,8 @@ void taosReadGlobalLogCfg() { } strcpy(configDir, full_path.we_wordv[0]); } else { - #ifdef _TD_POWER_ - printf("configDir:%s not there, use default value: /etc/power", configDir); - strcpy(configDir, "/etc/power"); - #elif (_TD_TQ_ == true) - printf("configDir:%s not there, use default value: /etc/tq", configDir); - strcpy(configDir, "/etc/tq"); - #elif (_TD_PRO_ == true) - printf("configDir:%s not there, use default value: /etc/ProDB", configDir); - strcpy(configDir, "/etc/ProDB"); - #elif (_TD_KH_ == true) - printf("configDir:%s not there, use default value: /etc/kinghistorian", configDir); - strcpy(configDir, "/etc/kinghistorian"); - #elif (_TD_JH_ == true) - printf("configDir:%s not there, use default value: /etc/jh_taos", configDir); - strcpy(configDir, "/etc/jh_taos"); - #else printf("configDir:%s not there, use default value: /etc/taos", configDir); strcpy(configDir, "/etc/taos"); - #endif } wordfree(&full_path); @@ -591,7 +574,7 @@ static void taosDumpCfg(SGlobalCfg *cfg) { } void taosDumpGlobalCfg() { - printf("taos global config:\n"); + printf(" global config:\n"); printf("==================================\n"); for (int i = 0; i < tsGlobalConfigNum; ++i) { SGlobalCfg *cfg = tsGlobalConfig + i; @@ -602,7 +585,7 @@ void taosDumpGlobalCfg() { taosDumpCfg(cfg); } - printf("\ntaos local config:\n"); + printf("\n local config:\n"); printf("==================================\n"); for (int i = 0; i < tsGlobalConfigNum; ++i) { diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 657d152c18a576f3c25e41e0ca461b57002f85aa..394099fc8dc32aa2876ba5bbfe261208a3524c07 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -81,20 +81,7 @@ int64_t dbgSmallWN = 0; int64_t dbgBigWN = 0; int64_t dbgWSize = 0; -#ifdef _TD_POWER_ -char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power"; -#elif (_TD_TQ_ == true) -char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq"; -#elif (_TD_PRO_ == true) -char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/ProDB"; -#elif (_TD_KH_ == true) -char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/kinghistorian"; -#elif (_TD_JH_ == true) -char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/jh_taos"; -#else char tsLogDir[PATH_MAX] = "/var/log/taos"; -#endif - static SLogObj tsLogObj = { .fileNum = 1 }; static void * taosAsyncOutputLog(void *param); static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen); diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c index c7f1385a566427a67a5695eea3943b063b3462b2..8bcef9f32b152082c7ab715ededc996b63558562 100644 --- a/src/util/src/tutil.c +++ b/src/util/src/tutil.c @@ -444,66 +444,6 @@ int32_t taosHexStrToByteArray(char hexstr[], char bytes[]) { return 0; } -// TODO move to comm module -bool taosGetVersionNumber(char *versionStr, int *versionNubmer) { - if (versionStr == NULL || versionNubmer == NULL) { - return false; - } - - int versionNumberPos[5] = {0}; - int len = (int)strlen(versionStr); - int dot = 0; - for (int pos = 0; pos < len && dot < 4; ++pos) { - if (versionStr[pos] == '.') { - versionStr[pos] = 0; - versionNumberPos[++dot] = pos + 1; - } - } - - if (dot != 3) { - return false; - } - - for (int pos = 0; pos < 4; ++pos) { - versionNubmer[pos] = atoi(versionStr + versionNumberPos[pos]); - } - versionStr[versionNumberPos[1] - 1] = '.'; - versionStr[versionNumberPos[2] - 1] = '.'; - versionStr[versionNumberPos[3] - 1] = '.'; - - return true; -} - -int taosCheckVersion(char *input_client_version, char *input_server_version, int comparedSegments) { - char client_version[TSDB_VERSION_LEN] = {0}; - char server_version[TSDB_VERSION_LEN] = {0}; - int clientVersionNumber[4] = {0}; - int serverVersionNumber[4] = {0}; - - tstrncpy(client_version, input_client_version, sizeof(client_version)); - tstrncpy(server_version, input_server_version, sizeof(server_version)); - - if (!taosGetVersionNumber(client_version, clientVersionNumber)) { - uError("invalid client version:%s", client_version); - return TSDB_CODE_TSC_INVALID_VERSION; - } - - if (!taosGetVersionNumber(server_version, serverVersionNumber)) { - uError("invalid server version:%s", server_version); - return TSDB_CODE_TSC_INVALID_VERSION; - } - - for(int32_t i = 0; i < comparedSegments; ++i) { - if (clientVersionNumber[i] != serverVersionNumber[i]) { - uError("the %d-th number of server version:%s not matched with client version:%s", i, server_version, - client_version); - return TSDB_CODE_TSC_INVALID_VERSION; - } - } - - return 0; -} - char *taosIpStr(uint32_t ipInt) { static char ipStrArray[3][30]; static int ipStrIndex = 0; diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 40148fcc6d34196bed1997cb2499a4202a460fe2..8d2487168fa36a744af7e4aed01cb09447822036 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -103,7 +103,9 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara } // write into WAL - code = walWrite(pVnode->wal, pHead); + if (!(tsShortcutFlag & TSDB_SHORTCUT_NR_VNODE_WAL_WRITE)) { + code = walWrite(pVnode->wal, pHead); + } if (code < 0) { if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1); vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code); diff --git a/tests/develop-test/1-insert/uppercase_in_stmt.py b/tests/develop-test/1-insert/uppercase_in_stmt.py new file mode 100644 index 0000000000000000000000000000000000000000..b9372b72cb1f5c30e32dda847bdd2dbe6611fd6b --- /dev/null +++ b/tests/develop-test/1-insert/uppercase_in_stmt.py @@ -0,0 +1,66 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from taos import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12977] fix invalid upper case table name of stmt api + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + self._dbname = "TD12977" + + def run(self): + tdSql.prepare() + + self._conn.execute("drop database if exists %s" % self._dbname) + self._conn.execute("create database if not exists %s" % self._dbname) + self._conn.select_db(self._dbname) + + self._conn.execute("create stable STB(ts timestamp, n int) tags(b int)") + + stmt = self._conn.statement("insert into ? using STB tags(?) values(?, ?)") + params = new_bind_params(1) + params[0].int(4); + stmt.set_tbname_tags("ct", params); + + multi_params = new_multi_binds(2); + multi_params[0].timestamp([1626861392589, 1626861392590]) + multi_params[1].int([123,456]) + stmt.bind_param_batch(multi_params) + + stmt.execute() + + tdSql.query("select * from stb") + tdSql.checkRows(2) + stmt.close() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/TD-13246.py b/tests/develop-test/2-query/TD-13246.py new file mode 100644 index 0000000000000000000000000000000000000000..e9675f8a0c99e8b528b13e77a05754b6d23e3d61 --- /dev/null +++ b/tests/develop-test/2-query/TD-13246.py @@ -0,0 +1,56 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-13246] Coredump when parentheses appear before the insert_sql + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.prepare() + + tdSql.execute('create stable st(ts timestamp , value int) tags (ind int)') + tdSql.execute('create table ctb using st tags(1)') + tdSql.execute('create table tb (ts timestamp, value int)') + tdSql.query('insert into ctb values(now, 1)'); + tdSql.query('insert into tb values(now, 1)'); + + tdSql.error('(insert into ctb values(now, 1)'); + tdSql.error('(insert into tb values(now, 1)'); + tdSql.error('(insert into ctb values'); + tdSql.error('(insert into ctb'); + tdSql.error('(insert into'); + tdSql.error('(insert'); + tdSql.error('('); + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/constant_compare.py b/tests/develop-test/2-query/constant_compare.py new file mode 100644 index 0000000000000000000000000000000000000000..760018bd4d3a930e4c86e3cb1b250094402a6b3f --- /dev/null +++ b/tests/develop-test/2-query/constant_compare.py @@ -0,0 +1,1190 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12945] : taos shell crash when constant comparison cause crash + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute('use db') + + #Prepare data + tdSql.execute("create table tb (ts timestamp, value int);") + tdSql.execute("insert into tb values (now, 123);") + + ##operator: = + tdSql.query('select 1 = 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 = 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 = 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 = 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 = 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 = 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 = 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 = 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 = 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 = 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 = true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 = false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 = false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 = true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 = true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 = false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 = false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 = true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true = 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true = 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" = "def" from tb;') + tdSql.error('select "abc" = 1 from tb;') + tdSql.error('select 1 = "abc" from tb;') + tdSql.error('select "abc" = 1.0 from tb;') + tdSql.error('select 1.0 = "abc" from tb;') + tdSql.error('select "abc" = true from tb;') + tdSql.error('select false = "abc" from tb;') + tdSql.error('select \'abc\' = \'def\' from tb;') + tdSql.error('select \'abc\' = 1 from tb;') + tdSql.error('select 1 = \'abc\' from tb;') + tdSql.error('select \'abc\' = 1.0 from tb;') + tdSql.error('select 1.0 = \'abc\' from tb;') + tdSql.error('select \'abc\' = true from tb;') + tdSql.error('select false = \'abc\' from tb;') + + + ##operator: != + tdSql.query('select 1 != 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 != 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 != 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 != 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 != 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 != false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 != true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 != true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 != false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 != false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 != true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true != 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true != 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" != "def" from tb;') + tdSql.error('select "abc" != 1 from tb;') + tdSql.error('select 1 != "abc" from tb;') + tdSql.error('select "abc" != 1.0 from tb;') + tdSql.error('select 1.0 != "abc" from tb;') + tdSql.error('select "abc" != true from tb;') + tdSql.error('select false != "abc" from tb;') + tdSql.error('select \'abc\' != \'def\' from tb;') + tdSql.error('select \'abc\' != 1 from tb;') + tdSql.error('select 1 != \'abc\' from tb;') + tdSql.error('select \'abc\' != 1.0 from tb;') + tdSql.error('select 1.0 != \'abc\' from tb;') + tdSql.error('select \'abc\' != true from tb;') + tdSql.error('select false != \'abc\' from tb;') + + ##operator: <> + tdSql.query('select 1 <> 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <> 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <> 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <> 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <> 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <> 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 <> 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 <> 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <> 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <> 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <> true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 <> false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <> false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 <> true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <> true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 <> false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 <> false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 <> true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <> 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <> 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" <> "def" from tb;') + tdSql.error('select "abc" <> 1 from tb;') + tdSql.error('select 1 <> "abc" from tb;') + tdSql.error('select "abc" <> 1.0 from tb;') + tdSql.error('select 1.0 <> "abc" from tb;') + tdSql.error('select "abc" <> true from tb;') + tdSql.error('select false <> "abc" from tb;') + tdSql.error('select \'abc\' <> \'def\' from tb;') + tdSql.error('select \'abc\' <> 1 from tb;') + tdSql.error('select 1 <> \'abc\' from tb;') + tdSql.error('select \'abc\' <> 1.0 from tb;') + tdSql.error('select 1.0 <> \'abc\' from tb;') + tdSql.error('select \'abc\' <> true from tb;') + tdSql.error('select false <> \'abc\' from tb;') + + ##operator: < + tdSql.query('select 1 < 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 < 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 < 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 < 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 < 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 < 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 < 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 < 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 < 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 < 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 < true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 < false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false < true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true < false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 < true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 < false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false < 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 < true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true < 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true < 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" < "def" from tb;') + tdSql.error('select "abc" < 1 from tb;') + tdSql.error('select 1 < "abc" from tb;') + tdSql.error('select "abc" < 1.0 from tb;') + tdSql.error('select 1.0 < "abc" from tb;') + tdSql.error('select "abc" < true from tb;') + tdSql.error('select false < "abc" from tb;') + tdSql.error('select \'abc\' < \'def\' from tb;') + tdSql.error('select \'abc\' < 1 from tb;') + tdSql.error('select 1 < \'abc\' from tb;') + tdSql.error('select \'abc\' < 1.0 from tb;') + tdSql.error('select 1.0 < \'abc\' from tb;') + tdSql.error('select \'abc\' < true from tb;') + tdSql.error('select false < \'abc\' from tb;') + + ##operator: > + tdSql.query('select 1 > 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 > 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 > 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 > 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0001 > 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 > 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 > 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 > 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.001 > 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0000000001 > 1.0 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 > false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 > true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false > true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true > false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 > true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 > false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 > false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true > 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.001 > true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0000000001 > true from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" > "def" from tb;') + tdSql.error('select "abc" > 1 from tb;') + tdSql.error('select 1 > "abc" from tb;') + tdSql.error('select "abc" > 1.0 from tb;') + tdSql.error('select 1.0 > "abc" from tb;') + tdSql.error('select "abc" > true from tb;') + tdSql.error('select false > "abc" from tb;') + tdSql.error('select \'abc\' > \'def\' from tb;') + tdSql.error('select \'abc\' > 1 from tb;') + tdSql.error('select 1 > \'abc\' from tb;') + tdSql.error('select \'abc\' > 1.0 from tb;') + tdSql.error('select 1.0 > \'abc\' from tb;') + tdSql.error('select \'abc\' > true from tb;') + tdSql.error('select false > \'abc\' from tb;') + + ##operator: <= + tdSql.query('select 1 <= 2 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <= 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 <= 1.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 <= 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 <= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false <= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 <= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false <= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 <= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <= 1.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true <= 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" <= "def" from tb;') + tdSql.error('select "abc" <= 1 from tb;') + tdSql.error('select 1 <= "abc" from tb;') + tdSql.error('select "abc" <= 1.0 from tb;') + tdSql.error('select 1.0 <= "abc" from tb;') + tdSql.error('select "abc" <= true from tb;') + tdSql.error('select false <= "abc" from tb;') + tdSql.error('select \'abc\' <= \'def\' from tb;') + tdSql.error('select \'abc\' <= 1 from tb;') + tdSql.error('select 1 <= \'abc\' from tb;') + tdSql.error('select \'abc\' <= 1.0 from tb;') + tdSql.error('select 1.0 <= \'abc\' from tb;') + tdSql.error('select \'abc\' <= true from tb;') + tdSql.error('select false <= \'abc\' from tb;') + + ##operator: >= + tdSql.query('select 1 >= 2 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 >= 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 >= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0001 >= 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= 1.0000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 >= 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 >= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 >= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.001 >= 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0000000001 >= 1.0 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 >= false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true >= 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.001 >= true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0000000001 >= true from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" >= "def" from tb;') + tdSql.error('select "abc" >= 1 from tb;') + tdSql.error('select 1 >= "abc" from tb;') + tdSql.error('select "abc" >= 1.0 from tb;') + tdSql.error('select 1.0 >= "abc" from tb;') + tdSql.error('select "abc" >= true from tb;') + tdSql.error('select false >= "abc" from tb;') + tdSql.error('select \'abc\' >= \'def\' from tb;') + tdSql.error('select \'abc\' >= 1 from tb;') + tdSql.error('select 1 >= \'abc\' from tb;') + tdSql.error('select \'abc\' >= 1.0 from tb;') + tdSql.error('select 1.0 >= \'abc\' from tb;') + tdSql.error('select \'abc\' >= true from tb;') + tdSql.error('select false >= \'abc\' from tb;') + + ##operator: between and + tdSql.query('select 1 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 3 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 between 2.0 and 4.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1.0 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 3 between 2.0 and 4.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 3.0 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2 between 2.0 and 4.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4 between 2.0 and 4.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2.0 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4.0 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2.0001 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2.0000000001 between 2 and 4 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 2 between 2.0001 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 2 between 2.000000001 and 4 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + + tdSql.query('select 4 between 2 and 4.0001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4 between 2 and 4.000000001 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 4.0001 between 2 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 4.000000001 between 2 and 4 from tb;') ##DBL_EPSILON is used in floating number comparison + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false between 0 and 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false between 1 and 2 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true between 0 and 1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true between -1 and 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select false between 0.0 and 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select false between 1.0 and 2.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true between 0.0 and 1.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true between -1.0 and 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 between false and true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 between false and true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 between false and 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 between true and 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 between false and true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1.0 between false and true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 between false and 10.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 between true and 10.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" between "def" and "ghi" from tb;') + tdSql.error('select "abc" between 1 and 2 from tb;') + tdSql.error('select "abc" between 1.0 and 2.0 from tb;') + tdSql.error('select "abc" between true and false from tb;') + tdSql.error('select 1 between 1.0 and "cde" from tb;') + tdSql.error('select 1.0 between true and "cde" from tb;') + tdSql.error('select true between 1 and "cde" from tb;') + tdSql.error('select 1 between "abc" and 1.0 from tb;') + tdSql.error('select 1.0 between "abc" and true from tb;') + tdSql.error('select true between "abc" and 1 from tb;') + + tdSql.error('select \'abc\' between \'def\' and \'ghi\' from tb;') + tdSql.error('select \'abc\' between 1 and 2 from tb;') + tdSql.error('select \'abc\' between 1.0 and 2.0 from tb;') + tdSql.error('select \'abc\' between true and false from tb;') + tdSql.error('select 1 between 1.0 and \'cde\' from tb;') + tdSql.error('select 1.0 between true and \'cde\' from tb;') + tdSql.error('select true between 1 and \'cde\' from tb;') + tdSql.error('select 1 between \'abc\' and 1.0 from tb;') + tdSql.error('select 1.0 between \'abc\' and true from tb;') + tdSql.error('select true between \'abc\' and 1 from tb;') + + ##operator: and + tdSql.query('select 10 and 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10.0 and 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10.0 and 10.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10 and 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 10.0 and 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 10.0 and 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 10.0 and 0.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10.0 and 0.000000000000000001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true and 10 and false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true and 10.0 and false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 and 2 and 3 and 10.1 and -20.02 and 22.03 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 and 2 and 3 and 0 and 20.02 and 22.03 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 and 2 and 3 and 0.0 and 20.02 and 22.03 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.error('select "abc" and "def" from tb;') + tdSql.error('select "abc" and 1 from tb;') + tdSql.error('select 1 and "abc" from tb;') + tdSql.error('select "abc" and 1.0 from tb;') + tdSql.error('select 1.0 and abc from tb;') + tdSql.error('select "abc" and true from tb;') + tdSql.error('select false and "abc" from tb;') + tdSql.error('select 1 and "abc" and 1.0 and true and false and 0 from tb;') + tdSql.error('select 1 and "abc" and 1.0 and "cde" and false and 0 from tb;') + tdSql.error('select 1 and "abc" and 1.0 and "cde" and false and "fhi" from tb;') + + tdSql.error('select \'abc\' and \'def\' from tb;') + tdSql.error('select \'abc\' and 1 from tb;') + tdSql.error('select 1 and \'abc\' from tb;') + tdSql.error('select \'abc\' and 1.0 from tb;') + tdSql.error('select 1.0 and abc from tb;') + tdSql.error('select \'abc\' and true from tb;') + tdSql.error('select false and \'abc\' from tb;') + tdSql.error('select 1 and \'abc\' and 1.0 and true and false and 0 from tb;') + tdSql.error('select 1 and \'abc\' and 1.0 and \'cde\' and false and "fhi" from tb;') + + + ##operator: or + tdSql.query('select 10 or 10 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10.0 or 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 10 or 0.0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 or 0 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0.0 or 0.001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0.0 or 0.000000000000000001 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 or 2 or 3 or 0.0 or -20.02 or 22.03 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 or 0.0 or 0.00 or 0.000 or 0.0000 or 0.00000 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select true or 10 or false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select true or 10.0 or false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select "abc" or "def" from tb;') + tdSql.error('select "abc" or 1 from tb;') + tdSql.error('select 1 or "abc" from tb;') + tdSql.error('select "abc" or 1.0 from tb;') + tdSql.error('select 1.0 or abc from tb;') + tdSql.error('select "abc" or true from tb;') + tdSql.error('select false or "abc" from tb;') + tdSql.error('select 1 or "abc" or 1.0 or true or false or 0 from tb;') + tdSql.error('select 1 or "abc" or 1.0 or "cde" or false or 0 from tb;') + tdSql.error('select 1 or "abc" or 1.0 or "cde" or false or "fhi" from tb;') + + tdSql.error('select \'abc\' or \'def\' from tb;') + tdSql.error('select \'abc\' or 1 from tb;') + tdSql.error('select 1 or \'abc\' from tb;') + tdSql.error('select \'abc\' or 1.0 from tb;') + tdSql.error('select 1.0 or abc from tb;') + tdSql.error('select \'abc\' or true from tb;') + tdSql.error('select false or \'abc\' from tb;') + tdSql.error('select 1 or \'abc\' or 1.0 or true or false or 0 from tb;') + tdSql.error('select 1 or \'abc\' or 1.0 or \'cde\' or false or "fhi" from tb;') + + ##operator: multiple operations + tdSql.query('select 1 and 1 != 2 and 1 < 2 and 2 between 1 and 3 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 or 1 = 2 or 1 >= 2 or 2 between 3 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 and 1 != 2 and 1 < 2 and 2 between 1 and 3 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 or 1 = 2 or 1 >= 2 or 2 between 3 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 and 1 >= 2 and 2 between 1 and 3 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 = 2 or 1 >= 2 or 1<>3 or 2 between 3 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 0 or 1 != 2 and 1 <= 2 and 2 between 3 and 4 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 0 or 1 = 2 and 1 <= 2 and 2 between 3 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 or true from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 and false from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 and true and 10.1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select 1 != 2 and 1 < 2 or 1 >= 2 or 2 between 4 and 5 and false or 10.1 from tb;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.error('select 1 != 2 and "abc" or 1 >= 2 or "cde" between 4 and 5 and \'ghi\' or 10.1 from tb;') + tdSql.error('select 1 != 2 and 1 < 2 or \'abc123\' or 2 between \'abc123\' and 5 and false or "abc123" from tb;') + tdSql.error('select \'1234\' or 1 < 2 or \'aace\' and "cde" between 4 and "def" and "ckas" or 10.1 from tb;') + + #operator: is NULL + tdSql.error('select 1 is NULL from tb;') + tdSql.error('select 1.0 is NULL from tb;') + tdSql.error('select true is NULL from tb;') + tdSql.error('select \'a\' is NULL from tb;') + tdSql.error('select "abc" is NULL from tb;') + tdSql.error('select 1 is NULL and 1.0 is NULL or "abc" is NULL from tb;') + + #operator: is not NULL + tdSql.error('select 1 is not NULL from tb;') + tdSql.error('select 1.0 is not NULL from tb;') + tdSql.error('select true is not NULL from tb;') + tdSql.error('select \'a\' is not NULL from tb;') + tdSql.error('select "abc" is not NULL from tb;') + tdSql.error('select 1 is not NULL and 1.0 is not NULL or "abc" is not NULL from tb;') + + #operator: like + tdSql.error('select 1 like 1 from tb;') + tdSql.error('select 1.0 like 1.0 from tb;') + tdSql.error('select true like true from tb;') + tdSql.error('select \'abc\' like \'a_\' from tb;') + tdSql.error('select "abc" like "ab__%" from tb;') + tdSql.error('select 1 like 1 and 1.0 like 1.0 or "abc" like "a%" from tb;') + + #operator: match + tdSql.error('select 1 match 1 from tb;') + tdSql.error('select 1.0 match 1.0 from tb;') + tdSql.error('select true match true from tb;') + tdSql.error('select \'abc\' match \'a_\' from tb;') + tdSql.error('select "abc" match "ab__%" from tb;') + tdSql.error('select 1 match 1 and 1.0 match 1.0 or "abc" match "a%" from tb;') + + #operator: nmatch + tdSql.error('select 1 nmatch 1 from tb;') + tdSql.error('select 1.0 nmatch 1.0 from tb;') + tdSql.error('select true nmatch true from tb;') + tdSql.error('select \'abc\' nmatch \'a_\' from tb;') + tdSql.error('select "abc" nmatch "ab__%" from tb;') + tdSql.error('select 1 nmatch 1 and 1.0 nmatch 1.0 or "abc" nmatch "a%" from tb;') + + #operator: in + tdSql.error('select 1 in 1 from tb;') + tdSql.error('select 1 in (1, 2, 3) from tb;') + tdSql.error('select 1.0 in 1.0 from tb;') + tdSql.error('select 1.0 in (1.0, 2.0, 3.0) from tb;') + tdSql.error('select true in (true, false, true) from tb;') + tdSql.error('select \'abc\' in (\'acd\', \'bce\') from tb;') + tdSql.error('select "abc" in ("acd", "bce") from tb;') + tdSql.error('select 1 in (1,2,3) and 1.0 in (1.0,2.0,3.0) or "abc" in ("abc","cde") from tb;') + + tdSql.execute('drop database db') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/diff_funcs.py b/tests/develop-test/2-query/diff_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..b7c1a0c0195f306a2e00a9cafc32e56a018bd54b --- /dev/null +++ b/tests/develop-test/2-query/diff_funcs.py @@ -0,0 +1,1142 @@ +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from math import inf + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-11204]Difference improvement that can ignore negative + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def restartTaosd(self, index=1, dbname="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use diffneg") + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists diffneg") + tdSql.execute("create database if not exists diffneg") + tdSql.execute('use diffneg') + tdSql.execute('create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') + + tdSql.execute("create table tb1 using stb1 tags(1,'1',1.0);") + + tdSql.execute("create table tb2 using stb1 tags(2,'2',2.0);") + + tdSql.execute("create table tb3 using stb1 tags(3,'3',3.0);") + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"123","1234",1,1,1,1);') + + tdSql.execute("insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);") + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:02\',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:03\',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:04\',true,4,4,4,4,4,4,"456","4567",4,4,4,4);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:05\',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:06\',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:01\',true,2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:02\',true,3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:04\',true,5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:05\',true,6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:06\',true,7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('create table tbn (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:01\',true,2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:02\',true,3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:04\',true,5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:05\',true,6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:06\',true,7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.query('select diff(c7,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + + tdSql.query('select diff(c7,0) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.query('select diff(c7) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.query('select diff(c7,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + + tdSql.query('select diff(c7,0) as a from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.error('select diff(c7) + 1 as a from tb1;') + + tdSql.error('select diff(tb1.*) + 1 as a from tb1;') + + tdSql.query('select diff(c7,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + + tdSql.query('select diff(c4,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + + tdSql.query('select diff(c4) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.error('select diff(c1 + c2) from tb1;') + + tdSql.error('select diff(13) from tb1;') + + tdSql.query('select diff(c4,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + + tdSql.query('select diff(c2) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 123) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c3) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 32763) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c4) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c5) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 9223372036854775803) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c6) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 3.4028234663852886e+38) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.query('select diff(c7) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.error('select diff(c8) from tb1;') + + tdSql.error('select diff(c9) from tb1;') + + tdSql.error('select diff(c10) from tb1;') + + tdSql.error('select diff(c11) from tb1;') + + tdSql.error('select diff(c12) from tb1;') + + tdSql.error('select diff(c13) from tb1;') + + tdSql.error('select diff(12345678900000000000000000) from tb1;') + + tdSql.error('select distinct diff(c4,1) from tb1;') + + tdSql.error('select diff(t1) from stb1;') + + tdSql.error('select diff(c4,1),avg(c3) from tb1;') + + tdSql.error('select diff(c4,1),top(c3,1) from tb1;') + + tdSql.error('select diff(c4,1) from tb1 session(ts, 1s);') + + tdSql.error('select diff(c4,1) from tb1 STATE_WINDOW(c4,1);') + + tdSql.error('select diff(c4,1) from tb1 interval(1s) sliding(1s) fill(NULL);') + + tdSql.error('select diff(c4,1) from stb1 group by t1;') + + tdSql.error('select diff(c4,1) from stb1 group by ts;') + + tdSql.error('select diff(c4,1) from stb1 group by c1;') + + tdSql.query('select diff(c4,1) from stb1 group by tbname;') + tdSql.checkRows(9) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 'tb1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(1, 2, 'tb1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(2, 2, 'tb1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(3, 1, 1) + tdSql.checkData(3, 2, 'tb2') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(4, 1, 0) + tdSql.checkData(4, 2, 'tb2') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(5, 1, 2) + tdSql.checkData(5, 2, 'tb2') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(6, 1, 1) + tdSql.checkData(6, 2, 'tb2') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(7, 1, 1) + tdSql.checkData(7, 2, 'tb2') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(8, 1, 1) + tdSql.checkData(8, 2, 'tb2') + + tdSql.error('select diff(c4,1) from tb1 order by c2;') + + tdSql.error('select diff(c8),diff(c9) from tbn;') + + tdSql.error('select diff(ts) from (select avg(c2) as a from stb1 interval(1s));') + + tdSql.query('select diff(a) from (select diff(c2) as a from tb1);') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(1, 1, 121) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(2, 1, -121) + + tdSql.error('select diff("abc") from tb1;') + + tdSql.query('select diff(c4,0) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c4,0) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c6) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 3.4028234663852886e+38) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.error('select diff(11)+c2 from tb1;') + + tdSql.error('select diff(c4,1)+c2 from tb1;') + + tdSql.error('select diff(c2)+11 from tb1;') + + tdSql.error('select diff(c4,1),c1,c2 from tb1;') + + tdSql.query('select diff(c4,1),t1,ts,tbname,_C0,_c0 from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1) + tdSql.checkData(0, 3, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 4, 'tb1') + tdSql.checkData(0, 5, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 6, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(1, 2, 1) + tdSql.checkData(1, 3, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 4, 'tb1') + tdSql.checkData(1, 5, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 6, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(2, 2, 1) + tdSql.checkData(2, 3, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 4, 'tb1') + tdSql.checkData(2, 5, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 6, datetime.datetime(2021, 11, 11, 9, 0, 5)) + + tdSql.error('select diff(c4,1),floor(c3) from tb1;') + + tdSql.error('select diff(c4,1),diff(c4,1) from tb1;') + + tdSql.query('select diff(c4,1) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(0, 1, 3) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(1, 1, 2147483643) + + tdSql.query('select diff(c2) from tb1 order by ts desc;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(0, 1, -2) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, -123) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, -2) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(3, 1, -1) + + tdSql.query('select diff(c4,1) from tb1 order by ts desc;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(0, 1, -2147483643) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 1, -2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(2, 1, -1) + + tdSql.query('select diff(c4,1) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, -1) + + tdSql.error('select diff(c2) from stb1;') + + tdSql.error('select diff(c2) from stb1 order by ts desc;') + + tdSql.error('select diff(c4),t1 from stb1 order by ts desc;') + + tdSql.error('select diff(c3),tbname from stb1;') + + tdSql.error('select diff(c3),tbname from stb1 where t1 > 1;') + + tdSql.error('select diff(c8),diff(c9) from tbn;') + + tdSql.error('select diff(c8),diff(c9) from tbn order by ts desc;') + + tdSql.error('select diff(diff(c8)) from tbn;') + + tdSql.query('select diff(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 1, 0.5) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(2, 1, 1.5) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(3, 1, 0.5) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(4, 1, 62.0) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(5, 1, -126.5) + + tdSql.error('select diff(c2) from (select * from stb1);') + + tdSql.query("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 1, 0.5) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(2, 1, 1.5) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(3, 1, 0.5) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(4, 1, 62.0) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(5, 1, -126.5) + + tdSql.query("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 1, 0.5) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(2, 1, 1.5) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(3, 1, 0.5) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(4, 1, 62.0) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(5, 1, -126.5) + + tdSql.query("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(0, 1, 126.5) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, -62.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(2, 1, -0.5) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(3, 1, -1.5) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(4, 1, -0.5) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(5, 1, -1.0) + + tdSql.error("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + + tdSql.error("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + + tdSql.query('select diff(a) from (select diff(c2) as a from tb1);') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(1, 1, 121) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(2, 1, -121) + + tdSql.error('select diff(tb1.c3),diff(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + + tdSql.query('select diff(c3) from tb1 union all select diff(c3) from tb2;') + tdSql.checkRows(10) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 32763) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(4, 1, 1) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(5, 1, 1) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(6, 1, 1) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(7, 1, 1) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(8, 1, 1) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(9, 1, 1) + + + tdSql.execute('create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') + + tdSql.execute("create table tba1 using stba tags(1,'1',1.0);") + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:00\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:01\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:02\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:04\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:05\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:06\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:07\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:08\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:09\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + self.restartTaosd(1, dbname='diffneg') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:10\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:11\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:12\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:13\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:14\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:15\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:16\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:17\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:18\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:19\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + self.restartTaosd(1, dbname='diffneg') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:20\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:21\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:22\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:23\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:24\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:25\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:26\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:27\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:28\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:29\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + tdSql.query('select diff(c7,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + + tdSql.query('select diff(c7,0) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.query('select diff(c7) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.query('select diff(c7,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + + tdSql.query('select diff(c7,0) as a from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.error('select diff(c7) + 1 as a from tb1;') + + tdSql.error('select diff(tb1.*) + 1 as a from tb1;') + + tdSql.query('select diff(c7,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + + tdSql.query('select diff(c4,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + + tdSql.query('select diff(c4) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.error('select diff(c1 + c2) from tb1;') + + tdSql.error('select diff(13) from tb1;') + + tdSql.query('select diff(c4,1) from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + + tdSql.query('select diff(c2) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 123) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c3) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 32763) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c4) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c5) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 9223372036854775803) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c6) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 3.4028234663852886e+38) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.query('select diff(c7) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 1.79769e+308) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.error('select diff(c8) from tb1;') + + tdSql.error('select diff(c9) from tb1;') + + tdSql.error('select diff(c10) from tb1;') + + tdSql.error('select diff(c11) from tb1;') + + tdSql.error('select diff(c12) from tb1;') + + tdSql.error('select diff(c13) from tb1;') + + tdSql.error('select diff(12345678900000000000000000) from tb1;') + + tdSql.error('select distinct diff(c4,1) from tb1;') + + tdSql.error('select diff(t1) from stb1;') + + tdSql.error('select diff(c4,1),avg(c3) from tb1;') + + tdSql.error('select diff(c4,1),top(c3,1) from tb1;') + + tdSql.error('select diff(c4,1) from tb1 session(ts, 1s);') + + tdSql.error('select diff(c4,1) from tb1 STATE_WINDOW(c4,1);') + + tdSql.error('select diff(c4,1) from tb1 interval(1s) sliding(1s) fill(NULL);') + + tdSql.error('select diff(c4,1) from stb1 group by t1;') + + tdSql.error('select diff(c4,1) from stb1 group by ts;') + + tdSql.error('select diff(c4,1) from stb1 group by c1;') + + tdSql.query('select diff(c4,1) from stb1 group by tbname;') + tdSql.checkRows(9) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 'tb1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(1, 2, 'tb1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(2, 2, 'tb1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(3, 1, 1) + tdSql.checkData(3, 2, 'tb2') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(4, 1, 0) + tdSql.checkData(4, 2, 'tb2') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(5, 1, 2) + tdSql.checkData(5, 2, 'tb2') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(6, 1, 1) + tdSql.checkData(6, 2, 'tb2') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(7, 1, 1) + tdSql.checkData(7, 2, 'tb2') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(8, 1, 1) + tdSql.checkData(8, 2, 'tb2') + + tdSql.error('select diff(c4,1) from tb1 order by c2;') + + tdSql.error('select diff(c8),diff(c9) from tbn;') + + tdSql.error('select diff(ts) from (select avg(c2) as a from stb1 interval(1s));') + + tdSql.query('select diff(a) from (select diff(c2) as a from tb1);') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(1, 1, 121) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(2, 1, -121) + + tdSql.error('select diff("abc") from tb1;') + + tdSql.query('select diff(c4,0) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c4,0) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + + tdSql.query('select diff(c6) from tb1;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 3.4028234663852886e+38) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, -inf) + + tdSql.error('select diff(11)+c2 from tb1;') + + tdSql.error('select diff(c4,1)+c2 from tb1;') + + tdSql.error('select diff(c2)+11 from tb1;') + + tdSql.error('select diff(c4,1),c1,c2 from tb1;') + + tdSql.query('select diff(c4,1),t1,ts,tbname,_C0,_c0 from tb1;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1) + tdSql.checkData(0, 3, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 4, 'tb1') + tdSql.checkData(0, 5, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(0, 6, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(1, 2, 1) + tdSql.checkData(1, 3, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 4, 'tb1') + tdSql.checkData(1, 5, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 6, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 2147483643) + tdSql.checkData(2, 2, 1) + tdSql.checkData(2, 3, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 4, 'tb1') + tdSql.checkData(2, 5, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 6, datetime.datetime(2021, 11, 11, 9, 0, 5)) + + tdSql.error('select diff(c4,1),floor(c3) from tb1;') + + tdSql.error('select diff(c4,1),diff(c4,1) from tb1;') + + tdSql.query('select diff(c4,1) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(2) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(0, 1, 3) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(1, 1, 2147483643) + + tdSql.query('select diff(c2) from tb1 order by ts desc;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(0, 1, -2) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, -123) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, -2) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(3, 1, -1) + + tdSql.query('select diff(c4,1) from tb1 order by ts desc;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(0, 1, -2147483643) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 1, -2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(2, 1, -1) + + tdSql.query('select diff(c4,1) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, -1) + + tdSql.error('select diff(c2) from stb1;') + + tdSql.error('select diff(c2) from stb1 order by ts desc;') + + tdSql.error('select diff(c4),t1 from stb1 order by ts desc;') + + tdSql.error('select diff(c3),tbname from stb1;') + + tdSql.error('select diff(c3),tbname from stb1 where t1 > 1;') + + tdSql.error('select diff(c8),diff(c9) from tbn;') + + tdSql.error('select diff(c8),diff(c9) from tbn order by ts desc;') + + tdSql.error('select diff(diff(c8)) from tbn;') + + tdSql.query('select diff(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 1, 0.5) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(2, 1, 1.5) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(3, 1, 0.5) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(4, 1, 62.0) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(5, 1, -126.5) + + tdSql.error('select diff(c2) from (select * from stb1);') + + tdSql.query("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 1, 0.5) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(2, 1, 1.5) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(3, 1, 0.5) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(4, 1, 62.0) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(5, 1, -126.5) + + tdSql.query("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(1, 1, 0.5) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(2, 1, 1.5) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(3, 1, 0.5) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(4, 1, 62.0) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(5, 1, -126.5) + + tdSql.query("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(0, 1, 126.5) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, -62.0) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(2, 1, -0.5) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(3, 1, -1.5) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(4, 1, -0.5) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(5, 1, -1.0) + + tdSql.error("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + + tdSql.error("select diff(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + + tdSql.query('select diff(a) from (select diff(c2) as a from tb1);') + tdSql.checkRows(3) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(1, 1, 121) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(2, 1, -121) + + tdSql.error('select diff(tb1.c3),diff(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + + tdSql.query('select diff(c3) from tb1 union all select diff(c3) from tb2;') + tdSql.checkRows(10) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(0, 1, 2) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(2, 1, 32763) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(3, 1, 2) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(4, 1, 1) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(5, 1, 1) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(6, 1, 1) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(7, 1, 1) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(8, 1, 1) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(9, 1, 1) + + tdSql.error('select diff(stb1.c4),diff(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + + tdSql.error('select diff(c4) as a from stb1 union all select diff(c5) as a from stba;') + + tdSql.error('select diff(c2) from stba;') + + tdSql.error('select diff(min(c2)) from tba1;') + + tdSql.error('select diff(max(c2)) from tba1;') + + tdSql.error('select diff(count(c2)) from tba1;') + + tdSql.error('select diff(sum(c2)) from tba1;') + + tdSql.error('select diff(avg(c2)) from tba1;') + + tdSql.error('select diff(percentile(c2, 10)) from tba1;') + + tdSql.error('select diff(apercentile(c2, 10)) from tba1;') + + tdSql.error('select diff(stddev(c2)) from tba1;') + + tdSql.error('select diff(spread(c2)) from tba1;') + + tdSql.error('select diff(twa(c2)) from tba1;') + + tdSql.error('select diff(leastsquares(c2, 1, 1)) from tba1;') + + tdSql.error('select diff(interp(c2)) from tba1 every(1s)') + + tdSql.error('select diff(interp(c2)) from stba every(1s) group by tbname;') + + tdSql.error('select diff(elapsed(ts)) from tba1;') + + tdSql.error('select diff(rate(c2)) from tba1;') + + tdSql.error('select diff(irate(c2)) from tba1;') + + tdSql.error('select diff(first(c2)) from tba1;') + + tdSql.error('select diff(last(c2)) from tba1;') + + tdSql.error('select diff(last_row(c2)) from tba1;') + + tdSql.error('select diff(top(c2, 1)) from tba1;') + + tdSql.error('select diff(bottom(c2, 1)) from tba1;') + + tdSql.error('select diff(leastsquares(c2, 1, 1)) from tba1;') + + tdSql.error('select diff(derivative(c2, 1s, 0)) from tba1;') + + tdSql.error('select diff(diff(c2)) from tba1;') + + tdSql.error('select diff(csum(c2)) from tba1;') + + tdSql.error('select diff(mavg(c2,2)) from tba1;') + + tdSql.error('select diff(sample(c2,2)) from tba1;') + + tdSql.error('select diff(_block_dist()) from tba1;') + + + tdSql.execute('drop database diffneg') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/diff_ignore_negative.py b/tests/develop-test/2-query/diff_ignore_negative.py new file mode 100644 index 0000000000000000000000000000000000000000..68489572288b50343a1a08549155fcd826f1ac94 --- /dev/null +++ b/tests/develop-test/2-query/diff_ignore_negative.py @@ -0,0 +1,153 @@ +import sys +from util.log import * +from util.cases import * +from util.sql import * + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-11204]Difference improvement that can ignore negative + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists diffneg") + tdSql.execute("create database if not exists diffneg") + tdSql.execute('use diffneg') + tdSql.execute('create table st(ts timestamp, c1 int, c2 float) tags(t int);') + + tdSql.execute('create table ct1 using st tags(1);') + + tdSql.execute('create table ct2 using st tags(2);') + + tdSql.execute('insert into ct1 values(1642662622000, 1, 2)(1642662622001, 2, 4)(1642662622003, -4, -6)(1642662622004, 4, 8);') + + tdSql.execute('insert into ct2 values(1642662622001, 11, 22)(1642662622002, 22, 44)(1642662622003, -44, -66) (1642662622004, 44, 88);') + + tdSql.query('select diff(c1) from st group by tbname') + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 1000)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 'ct1') + tdSql.checkData(1, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 3000)) + tdSql.checkData(1, 1, -6) + tdSql.checkData(1, 2, 'ct1') + tdSql.checkData(2, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(2, 1, 8) + tdSql.checkData(2, 2, 'ct1') + tdSql.checkData(3, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 2000)) + tdSql.checkData(3, 1, 11) + tdSql.checkData(3, 2, 'ct2') + tdSql.checkData(4, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 3000)) + tdSql.checkData(4, 1, -66) + tdSql.checkData(4, 2, 'ct2') + tdSql.checkData(5, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(5, 1, 88) + tdSql.checkData(5, 2, 'ct2') + + tdSql.query('select diff(c1,0) from st group by tbname') + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 1000)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 'ct1') + tdSql.checkData(1, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 3000)) + tdSql.checkData(1, 1, -6) + tdSql.checkData(1, 2, 'ct1') + tdSql.checkData(2, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(2, 1, 8) + tdSql.checkData(2, 2, 'ct1') + tdSql.checkData(3, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 2000)) + tdSql.checkData(3, 1, 11) + tdSql.checkData(3, 2, 'ct2') + tdSql.checkData(4, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 3000)) + tdSql.checkData(4, 1, -66) + tdSql.checkData(4, 2, 'ct2') + tdSql.checkData(5, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(5, 1, 88) + tdSql.checkData(5, 2, 'ct2') + + tdSql.query('select diff(c1,1) from st group by tbname') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 1000)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 'ct1') + tdSql.checkData(1, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(1, 2, 'ct1') + tdSql.checkData(2, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 2000)) + tdSql.checkData(2, 1, 11) + tdSql.checkData(2, 2, 'ct2') + tdSql.checkData(3, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(3, 1, 22) + tdSql.checkData(3, 2, 'ct2') + + tdSql.query('select diff(c2) from st group by tbname') + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 1000)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(0, 2, 'ct1') + tdSql.checkData(1, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 3000)) + tdSql.checkData(1, 1, -10.0) + tdSql.checkData(1, 2, 'ct1') + tdSql.checkData(2, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(2, 1, 14.0) + tdSql.checkData(2, 2, 'ct1') + tdSql.checkData(3, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 2000)) + tdSql.checkData(3, 1, 22.0) + tdSql.checkData(3, 2, 'ct2') + tdSql.checkData(4, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 3000)) + tdSql.checkData(4, 1, -110.0) + tdSql.checkData(4, 2, 'ct2') + tdSql.checkData(5, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(5, 1, 154.0) + tdSql.checkData(5, 2, 'ct2') + + tdSql.query('select diff(c2,0) from st group by tbname') + tdSql.checkRows(6) + tdSql.checkData(0, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 1000)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(0, 2, 'ct1') + tdSql.checkData(1, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 3000)) + tdSql.checkData(1, 1, -10.0) + tdSql.checkData(1, 2, 'ct1') + tdSql.checkData(2, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(2, 1, 14.0) + tdSql.checkData(2, 2, 'ct1') + tdSql.checkData(3, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 2000)) + tdSql.checkData(3, 1, 22.0) + tdSql.checkData(3, 2, 'ct2') + tdSql.checkData(4, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 3000)) + tdSql.checkData(4, 1, -110.0) + tdSql.checkData(4, 2, 'ct2') + tdSql.checkData(5, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(5, 1, 154.0) + tdSql.checkData(5, 2, 'ct2') + + tdSql.query('select diff(c2,1) from st group by tbname') + tdSql.checkRows(4) + tdSql.checkData(0, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 1000)) + tdSql.checkData(0, 1, 2.0) + tdSql.checkData(0, 2, 'ct1') + tdSql.checkData(1, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(1, 1, 4.0) + tdSql.checkData(1, 2, 'ct1') + tdSql.checkData(2, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 2000)) + tdSql.checkData(2, 1, 22.0) + tdSql.checkData(2, 2, 'ct2') + tdSql.checkData(3, 0, datetime.datetime(2022, 1, 20, 15, 10, 22, 4000)) + tdSql.checkData(3, 1, 44.0) + tdSql.checkData(3, 2, 'ct2') + + tdSql.execute('drop database diffneg') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/func_compare.py b/tests/develop-test/2-query/func_compare.py new file mode 100644 index 0000000000000000000000000000000000000000..6a5b4c7e975e04a982984671b39650a957ef4754 --- /dev/null +++ b/tests/develop-test/2-query/func_compare.py @@ -0,0 +1,393 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12861] : taoshell crash coredump for such as "select first(c1)==max(c1) from st" + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute('use db') + + #Prepare data + tdSql.execute("create table tb (ts timestamp, value int);") + tdSql.execute("insert into tb values (now, 1);") + tdSql.execute("insert into tb values (now, 2);") + tdSql.execute("insert into tb values (now, 3);") + tdSql.execute("insert into tb values (now, 4);") + + ##operator: = + tdSql.error('select count(*) = 4 from tb;') + tdSql.error('select avg(value) = 2.5 from tb') + tdSql.error('select twa(value) = 3.03 from tb') + tdSql.error('select sum(value) = 10 from tb') + tdSql.error('select stddev(value) = 2.0 from tb') + tdSql.error('select min(value) = 1 from tb') + tdSql.error('select max(value) = 4 from tb') + tdSql.error('select first(*) = 3 from tb') + tdSql.error('select last(*) = 3 from tb') + tdSql.error('select top(value, 3) = 3 from tb') + tdSql.error('select bottom(value, 3) = 3 from tb') + tdSql.error('select percentile(value, 50) = 3 from tb') + tdSql.error('select apercentile(value, 50) = 3 from tb') + tdSql.error('select last_row(*) = 3 from tb') + tdSql.error('select diff(value) = 3 from tb') + tdSql.error('select ceil(value) = 12 from tb') + tdSql.error('select floor(3.5) = 3 from tb') + tdSql.error('select round(3.5) = 3 from tb') + + tdSql.error('select count(*) = max(value) from tb') + tdSql.error('select avg(value) = min(value) from tb') + tdSql.error('select first(value) = last(value) from tb') + tdSql.error('select round(value) = round(value) from tb') + + ##operator: != + tdSql.error('select count(*) != 4 from tb;') + tdSql.error('select avg(value) != 2.5 from tb') + tdSql.error('select twa(value) != 3.03 from tb') + tdSql.error('select sum(value) != 10 from tb') + tdSql.error('select stddev(value) != 2.0 from tb') + tdSql.error('select min(value) != 1 from tb') + tdSql.error('select max(value) != 4 from tb') + tdSql.error('select first(*) != 3 from tb') + tdSql.error('select last(*) != 3 from tb') + tdSql.error('select top(value, 3) != 3 from tb') + tdSql.error('select bottom(value, 3) != 3 from tb') + tdSql.error('select percentile(value, 50) != 3 from tb') + tdSql.error('select apercentile(value, 50) != 3 from tb') + tdSql.error('select last_row(*) != 3 from tb') + tdSql.error('select diff(value) != 3 from tb') + tdSql.error('select ceil(value) != 12 from tb') + tdSql.error('select floor(3.5) != 3 from tb') + tdSql.error('select round(3.5) != 3 from tb') + + tdSql.error('select count(*) != max(value) from tb') + tdSql.error('select avg(value) != min(value) from tb') + tdSql.error('select first(value) != last(value) from tb') + tdSql.error('select round(value) != round(value) from tb') + + ##operator: <> + tdSql.error('select count(*) <> 4 from tb;') + tdSql.error('select avg(value) <> 2.5 from tb') + tdSql.error('select twa(value) <> 3.03 from tb') + tdSql.error('select sum(value) <> 10 from tb') + tdSql.error('select stddev(value) <> 2.0 from tb') + tdSql.error('select min(value) <> 1 from tb') + tdSql.error('select max(value) <> 4 from tb') + tdSql.error('select first(*) <> 3 from tb') + tdSql.error('select last(*) <> 3 from tb') + tdSql.error('select top(value, 3) <> 3 from tb') + tdSql.error('select bottom(value, 3) <> 3 from tb') + tdSql.error('select percentile(value, 50) <> 3 from tb') + tdSql.error('select apercentile(value, 50) <> 3 from tb') + tdSql.error('select last_row(*) <> 3 from tb') + tdSql.error('select diff(value) <> 3 from tb') + tdSql.error('select ceil(value) <> 12 from tb') + tdSql.error('select floor(3.5) <> 3 from tb') + tdSql.error('select round(3.5) <> 3 from tb') + + tdSql.error('select count(*) <> max(value) from tb') + tdSql.error('select avg(value) <> min(value) from tb') + tdSql.error('select first(value) <> last(value) from tb') + tdSql.error('select round(value) <> round(value) from tb') + + ##operator: < + tdSql.error('select count(*) < 4 from tb;') + tdSql.error('select avg(value) < 2.5 from tb') + tdSql.error('select twa(value) < 3.03 from tb') + tdSql.error('select sum(value) < 10 from tb') + tdSql.error('select stddev(value) < 2.0 from tb') + tdSql.error('select min(value) < 1 from tb') + tdSql.error('select max(value) < 4 from tb') + tdSql.error('select first(*) < 3 from tb') + tdSql.error('select last(*) < 3 from tb') + tdSql.error('select top(value, 3) < 3 from tb') + tdSql.error('select bottom(value, 3) < 3 from tb') + tdSql.error('select percentile(value, 50) < 3 from tb') + tdSql.error('select apercentile(value, 50) < 3 from tb') + tdSql.error('select last_row(*) < 3 from tb') + tdSql.error('select diff(value) < 3 from tb') + tdSql.error('select ceil(value) < 12 from tb') + tdSql.error('select floor(3.5) < 3 from tb') + tdSql.error('select round(3.5) < 3 from tb') + + tdSql.error('select count(*) < max(value) from tb') + tdSql.error('select avg(value) < min(value) from tb') + tdSql.error('select first(value) < last(value) from tb') + tdSql.error('select round(value) < round(value) from tb') + + ##operator: > + tdSql.error('select count(*) > 4 from tb;') + tdSql.error('select avg(value) > 2.5 from tb') + tdSql.error('select twa(value) > 3.03 from tb') + tdSql.error('select sum(value) > 10 from tb') + tdSql.error('select stddev(value) > 2.0 from tb') + tdSql.error('select min(value) > 1 from tb') + tdSql.error('select max(value) > 4 from tb') + tdSql.error('select first(*) > 3 from tb') + tdSql.error('select last(*) > 3 from tb') + tdSql.error('select top(value, 3) > 3 from tb') + tdSql.error('select bottom(value, 3) > 3 from tb') + tdSql.error('select percentile(value, 50) > 3 from tb') + tdSql.error('select apercentile(value, 50) > 3 from tb') + tdSql.error('select last_row(*) > 3 from tb') + tdSql.error('select diff(value) > 3 from tb') + tdSql.error('select ceil(value) > 12 from tb') + tdSql.error('select floor(3.5) > 3 from tb') + tdSql.error('select round(3.5) > 3 from tb') + + tdSql.error('select count(*) > max(value) from tb') + tdSql.error('select avg(value) > min(value) from tb') + tdSql.error('select first(value) > last(value) from tb') + tdSql.error('select round(value) > round(value) from tb') + + ##operator: <= + tdSql.error('select count(*) <= 4 from tb;') + tdSql.error('select avg(value) <= 2.5 from tb') + tdSql.error('select twa(value) <= 3.03 from tb') + tdSql.error('select sum(value) <= 10 from tb') + tdSql.error('select stddev(value) <= 2.0 from tb') + tdSql.error('select min(value) <= 1 from tb') + tdSql.error('select max(value) <= 4 from tb') + tdSql.error('select first(*) <= 3 from tb') + tdSql.error('select last(*) <= 3 from tb') + tdSql.error('select top(value, 3) <= 3 from tb') + tdSql.error('select bottom(value, 3) <= 3 from tb') + tdSql.error('select percentile(value, 50) <= 3 from tb') + tdSql.error('select apercentile(value, 50) <= 3 from tb') + tdSql.error('select last_row(*) <= 3 from tb') + tdSql.error('select diff(value) <= 3 from tb') + tdSql.error('select ceil(value) <= 12 from tb') + tdSql.error('select floor(3.5) <= 3 from tb') + tdSql.error('select round(3.5) <= 3 from tb') + + tdSql.error('select count(*) <= max(value) from tb') + tdSql.error('select avg(value) <= min(value) from tb') + tdSql.error('select first(value) <= last(value) from tb') + tdSql.error('select round(value) <= round(value) from tb') + + ##operator: >= + tdSql.error('select count(*) >= 4 from tb;') + tdSql.error('select avg(value) >= 2.5 from tb') + tdSql.error('select twa(value) >= 3.03 from tb') + tdSql.error('select sum(value) >= 10 from tb') + tdSql.error('select stddev(value) >= 2.0 from tb') + tdSql.error('select min(value) >= 1 from tb') + tdSql.error('select max(value) >= 4 from tb') + tdSql.error('select first(*) >= 3 from tb') + tdSql.error('select last(*) >= 3 from tb') + tdSql.error('select top(value, 3) >= 3 from tb') + tdSql.error('select bottom(value, 3) >= 3 from tb') + tdSql.error('select percentile(value, 50) >= 3 from tb') + tdSql.error('select apercentile(value, 50) >= 3 from tb') + tdSql.error('select last_row(*) >= 3 from tb') + tdSql.error('select diff(value) >= 3 from tb') + tdSql.error('select ceil(value) >= 12 from tb') + tdSql.error('select floor(3.5) >= 3 from tb') + tdSql.error('select round(3.5) >= 3 from tb') + + tdSql.error('select count(*) >= max(value) from tb') + tdSql.error('select avg(value) >= min(value) from tb') + tdSql.error('select first(value) >= last(value) from tb') + tdSql.error('select round(value) >= round(value) from tb') + + ##operator: between and + tdSql.error('select count(*) between 3 and 4 from tb;') + tdSql.error('select avg(value) between 1.5 and 2.5 from tb') + tdSql.error('select twa(value) between 3.0 and 3.03 from tb') + tdSql.error('select sum(value) between 1 and 10 from tb') + tdSql.error('select stddev(value) between 1 and 2.0 from tb') + tdSql.error('select min(value) between 2 and 5 from tb') + tdSql.error('select max(value) between 1 and 10 from tb') + tdSql.error('select first(*) between 1 and 3 from tb') + tdSql.error('select last(*) between 0 and 3 from tb') + tdSql.error('select top(value, 3) between 0.0 and 3 from tb') + tdSql.error('select bottom(value, 3) between 0.0 and 3 from tb') + tdSql.error('select percentile(value, 50) between 1 and 3 from tb') + tdSql.error('select apercentile(value, 50) between 2 and 3 from tb') + tdSql.error('select last_row(*) between 2 and 3 from tb') + tdSql.error('select diff(value) between 1 and 3 from tb') + tdSql.error('select ceil(value) between 5 and 12 from tb') + tdSql.error('select floor(3.5) between 12 and 3 from tb') + tdSql.error('select round(3.5) between true and 3 from tb') + + tdSql.error('select count(*) between min(value) and max(value) from tb') + tdSql.error('select avg(*) between min(value) and 3 from tb') + tdSql.error('select avg(value) between 1 and max(value) from tb') + tdSql.error('select first(value) between first(value) and last(value) from tb') + tdSql.error('select round(value) between ceil(value) and floor(value) from tb') + + ##operator: and + tdSql.error('select count(*) and 1 from tb;') + tdSql.error('select avg(value) and 0.0 from tb') + tdSql.error('select twa(value) and true from tb') + tdSql.error('select sum(value) and false from tb') + tdSql.error('select 1 and stddev(value) from tb') + tdSql.error('select 0.0 and min(value) from tb') + tdSql.error('select true and max(value) from tb') + tdSql.error('select false and first(*) from tb') + tdSql.error('select last(*) and first(value) from tb') + tdSql.error('select top(value, 3) and bottom(value, 3) from tb') + tdSql.error('select percentile(value, 50) and apercentile(value, 50) from tb') + tdSql.error('select diff(value) and ceil(value) from tb') + tdSql.error('select floor(3.5) and round(3.5) and ceil(3.5) from tb') + tdSql.error('select true and round(3.5) and 3 from tb') + + ##operator: or + tdSql.error('select count(*) or 1 from tb;') + tdSql.error('select avg(value) or 0.0 from tb') + tdSql.error('select twa(value) or true from tb') + tdSql.error('select sum(value) or false from tb') + tdSql.error('select 1 or stddev(value) from tb') + tdSql.error('select 0.0 or min(value) from tb') + tdSql.error('select true or max(value) from tb') + tdSql.error('select false or first(*) from tb') + tdSql.error('select last(*) or first(value) from tb') + tdSql.error('select top(value, 3) or bottom(value, 3) from tb') + tdSql.error('select percentile(value, 50) or apercentile(value, 50) from tb') + tdSql.error('select diff(value) or ceil(value) from tb') + tdSql.error('select floor(3.5) or round(3.5) or ceil(3.5) from tb') + tdSql.error('select true or round(3.5) or 3 from tb') + + ##operator: multiple operations + tdSql.error('select count(*) <> avg(value) or twa(value) and sum(value) or 1 from tb;') + tdSql.error('select 1 and stddev(value) <= min(value) or max(value) and first(*) or 0.0 from tb') + tdSql.error('select last(*) and first(value) or top(value, 3) and 3 between 4.0 and bottom(value, 3)from tb') + tdSql.error('select percentile(value, 50) or diff(value) = ceil(value) and apercentile(value, 50) from tb') + tdSql.error('select floor(3.5) or round(3.5) and ceil(3.5) > true and round(3.5) or 3 from tb') + + #operator: is NULL + tdSql.error('select count(*) is NULL from tb;') + tdSql.error('select avg(value) is NULL from tb;') + tdSql.error('select twa(value) is NULL from tb;') + tdSql.error('select sum(value) is NULL from tb;') + tdSql.error('select stddev(value) is NULL from tb;') + tdSql.error('select min(value) is NULL from tb;') + tdSql.error('select max(value) is NULL from tb;') + tdSql.error('select first(*) is NULL from tb;') + tdSql.error('select last(*) is NULL from tb;') + tdSql.error('select top(value, 3) is NULL or bottom(value,3) is NULL from tb;') + tdSql.error('select percentile(value, 50) is NULL or apercentile(value, 50) is NULL from tb') + tdSql.error('select diff(value) is NULL or ceil(value) is NULL from tb') + tdSql.error('select floor(3.5) is NULL or round(3.5) is NULL or ceil(3.5) is NULL from tb') + + #operator: is not NULL + tdSql.error('select count(*) is not NULL from tb;') + tdSql.error('select avg(value) is not NULL from tb;') + tdSql.error('select twa(value) is not NULL from tb;') + tdSql.error('select sum(value) is not NULL from tb;') + tdSql.error('select stddev(value) is not NULL from tb;') + tdSql.error('select min(value) is not NULL from tb;') + tdSql.error('select max(value) is not NULL from tb;') + tdSql.error('select first(*) is not NULL from tb;') + tdSql.error('select last(*) is not NULL from tb;') + tdSql.error('select top(value, 3) is not NULL or bottom(value,3) is not NULL from tb;') + tdSql.error('select percentile(value, 50) is not NULL or apercentile(value, 50) is not NULL from tb') + tdSql.error('select diff(value) is not NULL or ceil(value) is not NULL from tb') + tdSql.error('select floor(3.5) is not NULL or round(3.5) is not NULL or ceil(3.5) is not NULL from tb') + + #operator: like + tdSql.error('select count(*) like "abc" from tb;') + tdSql.error('select avg(value) like "abc" from tb;') + tdSql.error('select twa(value) like "abc" from tb;') + tdSql.error('select sum(value) like "abc" from tb;') + tdSql.error('select stddev(value) like "abc" from tb;') + tdSql.error('select min(value) like "abc" from tb;') + tdSql.error('select max(value) like "abc" from tb;') + tdSql.error('select first(*) like "abc" from tb;') + tdSql.error('select last(*) like "abc" from tb;') + tdSql.error('select top(value, 3) like "abc" or bottom(value,3) like "abc" from tb;') + tdSql.error('select percentile(value, 50) like "abc" or apercentile(value, 50) like "abc" from tb') + tdSql.error('select diff(value) like "abc" or ceil(value) like "abc" from tb') + tdSql.error('select floor(3.5) like "abc" or round(3.5) like "abc" or ceil(3.5) like "abc" from tb') + + #operator: match + tdSql.error('select count(*) match "abc" from tb;') + tdSql.error('select avg(value) match "abc" from tb;') + tdSql.error('select twa(value) match "abc" from tb;') + tdSql.error('select sum(value) match "abc" from tb;') + tdSql.error('select stddev(value) match "abc" from tb;') + tdSql.error('select min(value) match "abc" from tb;') + tdSql.error('select max(value) match "abc" from tb;') + tdSql.error('select first(*) match "abc" from tb;') + tdSql.error('select last(*) match "abc" from tb;') + tdSql.error('select top(value, 3) match "abc" or bottom(value,3) match "abc" from tb;') + tdSql.error('select percentile(value, 50) match "abc" or apercentile(value, 50) match "abc" from tb') + tdSql.error('select diff(value) match "abc" or ceil(value) match "abc" from tb') + tdSql.error('select floor(3.5) match "abc" or round(3.5) match "abc" or ceil(3.5) match "abc" from tb') + + #operator: nmatch + tdSql.error('select count(*) nmatch "abc" from tb;') + tdSql.error('select avg(value) nmatch "abc" from tb;') + tdSql.error('select twa(value) nmatch "abc" from tb;') + tdSql.error('select sum(value) nmatch "abc" from tb;') + tdSql.error('select stddev(value) nmatch "abc" from tb;') + tdSql.error('select min(value) nmatch "abc" from tb;') + tdSql.error('select max(value) nmatch "abc" from tb;') + tdSql.error('select first(*) nmatch "abc" from tb;') + tdSql.error('select last(*) nmatch "abc" from tb;') + tdSql.error('select top(value, 3) nmatch "abc" or bottom(value,3) nmatch "abc" from tb;') + tdSql.error('select percentile(value, 50) nmatch "abc" or apercentile(value, 50) nmatch "abc" from tb') + tdSql.error('select diff(value) nmatch "abc" or ceil(value) nmatch "abc" from tb') + tdSql.error('select floor(3.5) nmatch "abc" or round(3.5) nmatch "abc" or ceil(3.5) nmatch "abc" from tb') + + #operator: in + tdSql.error('select count(*) in 1 from tb;') + tdSql.error('select avg(value) in (1, 2, 3) from tb;') + tdSql.error('select twa(value) in 1.0 from tb;') + tdSql.error('select sum(value) in (1.0, 2.0, 3.0) from tb;') + tdSql.error('select min(value) in (true, false, true) from tb;') + tdSql.error('select tbname in (\'acd\', \'bce\') from tb;') + tdSql.error('select t in ("acd", "bce") from tb;') + tdSql.error('select top(value, 3) in (1,2,3) and ceil(value) in (1.0,2.0,3.0) or last(*) in ("abc","cde") from tb;') + + tdSql.execute('drop database db') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/2-query/math_funcs.py b/tests/develop-test/2-query/math_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..d7ea3b5b05ac51ed5f830fd10df06587f0aa0550 --- /dev/null +++ b/tests/develop-test/2-query/math_funcs.py @@ -0,0 +1,10686 @@ +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from math import inf + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-11204]Difference improvement that can ignore negative + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def restartTaosd(self, index=1, dbname="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use math_funcs") + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists math_funcs") + tdSql.execute("create database if not exists math_funcs") + tdSql.execute('use math_funcs') + tdSql.execute('create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') + + tdSql.execute("create table tb1 using stb1 tags(1,'1',1.0);") + + tdSql.execute("create table tb2 using stb1 tags(2,'2',2.0);") + + tdSql.execute("create table tb3 using stb1 tags(3,'3',3.0);") + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"123","1234",1,1,1,1);') + + tdSql.execute("insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);") + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:02\',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:03\',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:04\',true,4,4,4,4,4,4,"456","4567",4,4,4,4);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:05\',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807);') + + tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:06\',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:01\',true,2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:02\',true,3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:04\',true,5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:05\',true,6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:06\',true,7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('create table tbn (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:01\',true,2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:02\',true,3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:04\',true,5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:05\',true,6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tbn values (\'2021-11-11 09:00:06\',true,7,7,7,7,7,7,"777","7777",7,7,7,7);') + + #=========== begin math_sqrt ========== + tdSql.query('select sqrt(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 3.605551275463989) + tdSql.checkData(1, 0, 3.605551275463989) + tdSql.checkData(2, 0, 3.605551275463989) + tdSql.checkData(3, 0, 3.605551275463989) + tdSql.checkData(4, 0, 3.605551275463989) + tdSql.checkData(5, 0, 3.605551275463989) + tdSql.checkData(6, 0, 3.605551275463989) + + tdSql.query('select sqrt(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 11.269427669584644) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 181.01657382681842) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 46340.950001051984) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 3037000499.97605) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 1.844674352395373e+19) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 1.3407796239501852e+154) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 15.937377450509228) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sqrt(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 255.99609372019722) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sqrt(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 65535.99998474121) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sqrt(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 3037000499.97605) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sqrt(a) from (select sqrt(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.189207115002721) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.4142135623730951) + tdSql.checkData(5, 0, 3.3569968229929326) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.4142135623730951) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.8284271247461903) + tdSql.checkData(5, 0, 181.36703118262702) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.4142135623730951) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.8284271247461903) + tdSql.checkData(5, 0, 181.36703118262702) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 3.7416573867739413) + tdSql.checkData(5, 0, 46385.82811592351) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 4.3166247903554) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 5.3166247903554) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 7.3166247903554) + tdSql.checkData(5, 0, 130.3166247903554) + tdSql.checkData(6, 0, -123.6833752096446) + + tdSql.query('select sqrt(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 12.414213562373096) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 13.0) + tdSql.checkData(5, 0, 22.269427669584644) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 1.4142135623730951) + tdSql.checkData(1, 0, 2.8284271247461903) + tdSql.checkData(2, 0, 181.36703118262702) + tdSql.checkData(3, 0, None) + + tdSql.query('select sqrt(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 11.269427669584644) + tdSql.checkData(2, 0, 2.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.4142135623730951) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select sqrt(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 181.36703118262702) + tdSql.checkData(2, 0, 2.8284271247461903) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.4142135623730951) + + tdSql.query('select sqrt(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 2.8284271247461903) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select sqrt(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 11.269427669584644) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.7320508075688772) + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(13, 0, 2.6457513110645907) + + tdSql.query('select sqrt(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select sqrt(c4),t1,c4 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select sqrt(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 181.01657382681842) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 1.7320508075688772) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 2.6457513110645907) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select sqrt(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 1.7320508075688772) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 2.23606797749979) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 2.449489742783178) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 2.6457513110645907) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select sqrt(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(2, 0, 1.5811388300841898) + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(4, 0, 2.1213203435596424) + tdSql.checkData(5, 0, 8.154753215150045) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 11.269427669584644) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.7320508075688772) + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(13, 0, 2.6457513110645907) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(2, 0, 1.5811388300841898) + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(4, 0, 2.1213203435596424) + tdSql.checkData(5, 0, 8.154753215150045) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(2, 0, 1.5811388300841898) + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(4, 0, 2.1213203435596424) + tdSql.checkData(5, 0, 8.154753215150045) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 8.154753215150045) + tdSql.checkData(5, 0, 2.1213203435596424) + tdSql.checkData(6, 0, 2.0) + tdSql.checkData(7, 0, 1.5811388300841898) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.0) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 8.154753215150045) + tdSql.checkData(4, 0, 2.1213203435596424) + tdSql.checkData(5, 0, 2.0) + tdSql.checkData(6, 0, 1.5811388300841898) + tdSql.checkData(7, 0, 1.4142135623730951) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, None) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, 1.5811388300841898) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 2.1213203435596424) + tdSql.checkData(6, 0, 8.154753215150045) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select sqrt(a) from (select sqrt(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.189207115002721) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.4142135623730951) + tdSql.checkData(5, 0, 3.3569968229929326) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(tb1.c3),sqrt(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 1.4142135623730951) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 1.7320508075688772) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(3, 1, 2.0) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(4, 1, 2.23606797749979) + tdSql.checkData(5, 0, 181.01657382681842) + tdSql.checkData(5, 1, 2.449489742783178) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 2.6457513110645907) + + tdSql.query('select sqrt(c3) from tb1 union all select sqrt(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 181.01657382681842) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.7320508075688772) + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(13, 0, 2.6457513110645907) + + #=========== end math_sqrt ========== + + + #=========== begin math_abs ========== + tdSql.query('select abs(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 13) + tdSql.checkData(1, 0, 13) + tdSql.checkData(2, 0, 13) + tdSql.checkData(3, 0, 13) + tdSql.checkData(4, 0, 13) + tdSql.checkData(5, 0, 13) + tdSql.checkData(6, 0, 13) + + tdSql.query('select abs(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + + tdSql.query('select abs(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 32767) + tdSql.checkData(6, 0, 32767) + + tdSql.query('select abs(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 2147483647) + tdSql.checkData(6, 0, 2147483647) + + tdSql.query('select abs(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 9223372036854775807) + tdSql.checkData(6, 0, 9223372036854775807) + + tdSql.query('select abs(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4.0) + tdSql.checkData(5, 0, 3.4028234663852886e+38) + tdSql.checkData(6, 0, 3.4028234663852886e+38) + + tdSql.query('select abs(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3.0) + tdSql.checkData(4, 0, 4.0) + tdSql.checkData(5, 0, 1.79769e+308) + tdSql.checkData(6, 0, 1.79769e+308) + + tdSql.query('select abs(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 254) + tdSql.checkData(6, 0, 0) + + tdSql.query('select abs(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 65534) + tdSql.checkData(6, 0, 0) + + tdSql.query('select abs(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 4294967294) + tdSql.checkData(6, 0, 0) + + tdSql.query('select abs(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 9223372036854775807) + tdSql.checkData(6, 0, 0) + + tdSql.query('select abs(a) from (select abs(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + + tdSql.query('select abs(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 8.0) + tdSql.checkData(5, 0, 32894.0) + tdSql.checkData(6, 0, 32894.0) + + tdSql.query('select abs((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 8.0) + tdSql.checkData(5, 0, 32894.0) + tdSql.checkData(6, 0, 32894.0) + + tdSql.query('select abs((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 4.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 14.0) + tdSql.checkData(5, 0, 2151645050.0) + tdSql.checkData(6, 0, 2143322244.0) + + tdSql.query('select abs(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 13.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 15.0) + tdSql.checkData(5, 0, 138.0) + tdSql.checkData(6, 0, -116.0) + + tdSql.query('select abs(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 13.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 15.0) + tdSql.checkData(5, 0, 138.0) + tdSql.checkData(6, 0, 138.0) + + tdSql.query('select abs(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, 8.0) + tdSql.checkData(2, 0, 32894.0) + tdSql.checkData(3, 0, 32894.0) + + tdSql.query('select abs(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 127) + tdSql.checkData(1, 0, 127) + tdSql.checkData(2, 0, 4) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1) + + tdSql.query('select abs(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 32894.0) + tdSql.checkData(1, 0, 32894.0) + tdSql.checkData(2, 0, 8.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 2.0) + + tdSql.query('select abs(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 8.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select abs(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 2) + tdSql.checkData(9, 0, 3) + tdSql.checkData(10, 0, 4) + tdSql.checkData(11, 0, 5) + tdSql.checkData(12, 0, 6) + tdSql.checkData(13, 0, 7) + + tdSql.query('select abs(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select abs(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select abs(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, 3) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 4) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 32767) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, 32767) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 2) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 3) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 4) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 5) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 6) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 7) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select abs(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 2) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 3) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 4) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 5) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 6) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 7) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select abs(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 2.0) + tdSql.checkData(2, 0, 2.5) + tdSql.checkData(3, 0, 4.0) + tdSql.checkData(4, 0, 4.5) + tdSql.checkData(5, 0, 66.5) + tdSql.checkData(6, 0, 60.0) + + tdSql.query('select abs(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 2) + tdSql.checkData(9, 0, 3) + tdSql.checkData(10, 0, 4) + tdSql.checkData(11, 0, 5) + tdSql.checkData(12, 0, 6) + tdSql.checkData(13, 0, 7) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 2.0) + tdSql.checkData(2, 0, 2.5) + tdSql.checkData(3, 0, 4.0) + tdSql.checkData(4, 0, 4.5) + tdSql.checkData(5, 0, 66.5) + tdSql.checkData(6, 0, 60.0) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 2.0) + tdSql.checkData(2, 0, 2.5) + tdSql.checkData(3, 0, 4.0) + tdSql.checkData(4, 0, 4.5) + tdSql.checkData(5, 0, 66.5) + tdSql.checkData(6, 0, 60.0) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 60.0) + tdSql.checkData(4, 0, 66.5) + tdSql.checkData(5, 0, 4.5) + tdSql.checkData(6, 0, 4.0) + tdSql.checkData(7, 0, 2.5) + tdSql.checkData(8, 0, 2.0) + tdSql.checkData(9, 0, 1.0) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 66.5) + tdSql.checkData(4, 0, 4.5) + tdSql.checkData(5, 0, 4.0) + tdSql.checkData(6, 0, 2.5) + tdSql.checkData(7, 0, 2.0) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 60.0) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 60.0) + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(2, 0, 2.0) + tdSql.checkData(3, 0, 2.5) + tdSql.checkData(4, 0, 4.0) + tdSql.checkData(5, 0, 4.5) + tdSql.checkData(6, 0, 66.5) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select abs(a) from (select abs(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + + tdSql.query('select abs(tb1.c3),abs(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 3) + tdSql.checkData(3, 0, 3) + tdSql.checkData(3, 1, 4) + tdSql.checkData(4, 0, 4) + tdSql.checkData(4, 1, 5) + tdSql.checkData(5, 0, 32767) + tdSql.checkData(5, 1, 6) + tdSql.checkData(6, 0, 32767) + tdSql.checkData(6, 1, 7) + + tdSql.query('select abs(c3) from tb1 union all select abs(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 32767) + tdSql.checkData(6, 0, 32767) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 2) + tdSql.checkData(9, 0, 3) + tdSql.checkData(10, 0, 4) + tdSql.checkData(11, 0, 5) + tdSql.checkData(12, 0, 6) + tdSql.checkData(13, 0, 7) + + #=========== end math_abs ========== + + + #=========== begin math_asin ========== + tdSql.query('select asin(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select asin(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select asin(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select asin(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select asin(a) from (select asin(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.570796326794897) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + + tdSql.query('select asin(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select asin(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select asin(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query('select asin(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select asin(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select asin(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, None) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, None) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, None) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, None) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, None) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, None) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select asin(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select asin(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 1.5707963267948966) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.5707963267948966) + tdSql.checkData(9, 0, None) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1.5707963267948966) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select asin(a) from (select asin(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(tb1.c3),asin(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(0, 1, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select asin(c3) from tb1 union all select asin(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + #=========== end math_asin ========== + + + #=========== begin math_acos ========== + tdSql.query('select acos(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(a) from (select acos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 11.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + + tdSql.query('select acos(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select acos(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select acos(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query('select acos(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select acos(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select acos(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, None) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, None) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, None) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, None) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, None) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, None) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select acos(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select acos(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 0.0) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 0.0) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 0.0) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select acos(a) from (select acos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(tb1.c3),acos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select acos(c3) from tb1 union all select acos(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + #=========== end math_acos ========== + + + #=========== begin math_atan ========== + tdSql.query('select acos(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(a) from (select acos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 11.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + + tdSql.query('select acos(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select acos(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select acos(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query('select acos(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select acos(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select acos(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, None) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, None) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, None) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, None) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, None) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, None) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select acos(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select acos(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 0.0) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 0.0) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 0.0) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select acos(a) from (select acos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(tb1.c3),acos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select acos(c3) from tb1 union all select acos(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + #=========== end math_atan ========== + + + #=========== begin math_sin ========== + tdSql.query('select sin(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.4201670368266409) + tdSql.checkData(1, 0, 0.4201670368266409) + tdSql.checkData(2, 0, 0.4201670368266409) + tdSql.checkData(3, 0, 0.4201670368266409) + tdSql.checkData(4, 0, 0.4201670368266409) + tdSql.checkData(5, 0, 0.4201670368266409) + tdSql.checkData(6, 0, 0.4201670368266409) + + tdSql.query('select sin(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.972630067242408) + tdSql.checkData(6, 0, -0.972630067242408) + + tdSql.query('select sin(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.18750655394138943) + tdSql.checkData(6, 0, -0.18750655394138943) + + tdSql.query('select sin(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, -0.7249165551445564) + tdSql.checkData(6, 0, 0.7249165551445564) + + tdSql.query('select sin(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.9999303766734422) + tdSql.checkData(6, 0, -0.9999303766734422) + + tdSql.query('select sin(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, -0.5218765233336585) + tdSql.checkData(6, 0, 0.5218765233336585) + + tdSql.query('select sin(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.8728292970885063) + tdSql.checkData(6, 0, -0.8728292970885063) + + tdSql.query('select sin(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.45199889806298343) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sin(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.3683616323063538) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sin(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.9986982434666626) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sin(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.9999303766734422) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sin(a) from (select sin(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.7456241416655579) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.7890723435728884) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6866002607386249) + tdSql.checkData(5, 0, 0.8263696344332049) + tdSql.checkData(6, 0, -0.8263696344332049) + + tdSql.query('select sin(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.9092974268256817) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.9893582466233818) + tdSql.checkData(5, 0, 0.9989477243796069) + tdSql.checkData(6, 0, -0.9989477243796069) + + tdSql.query('select sin((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.9092974268256817) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.9893582466233818) + tdSql.checkData(5, 0, 0.9989477243796069) + tdSql.checkData(6, 0, -0.9989477243796069) + + tdSql.query('select sin((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.7568024953079282) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.9906073556948704) + tdSql.checkData(5, 0, 0.08341720348826624) + tdSql.checkData(6, 0, 0.9909625452221539) + + tdSql.query('select sin(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 9.793449296524592e-06) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0000097934492964) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 3.0000097934492964) + tdSql.checkData(5, 0, 126.0000097934493) + tdSql.checkData(6, 0, -127.9999902065507) + + tdSql.query('select sin(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 11.841470984807897) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 11.909297426825681) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 10.243197504692072) + tdSql.checkData(5, 0, 11.972630067242408) + tdSql.checkData(6, 0, 10.027369932757592) + + tdSql.query('select sin(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 0.9092974268256817) + tdSql.checkData(1, 0, 0.9893582466233818) + tdSql.checkData(2, 0, 0.9989477243796069) + tdSql.checkData(3, 0, -0.9989477243796069) + + tdSql.query('select sin(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.972630067242408) + tdSql.checkData(1, 0, 0.972630067242408) + tdSql.checkData(2, 0, -0.7568024953079282) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.9092974268256817) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.8414709848078965) + + tdSql.query('select sin(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.9989477243796069) + tdSql.checkData(1, 0, 0.9989477243796069) + tdSql.checkData(2, 0, 0.9893582466233818) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.9092974268256817) + + tdSql.query('select sin(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 0.9893582466233818) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select sin(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.972630067242408) + tdSql.checkData(6, 0, -0.972630067242408) + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.1411200080598672) + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(13, 0, 0.6569865987187891) + + tdSql.query('select sin(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select sin(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select sin(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 0.18750655394138943) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, -0.18750655394138943) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 0.1411200080598672) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 0.6569865987187891) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select sin(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 0.1411200080598672) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, -0.9589242746631385) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, -0.27941549819892586) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 0.6569865987187891) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select sin(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(2, 0, 0.5984721441039565) + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(4, 0, -0.977530117665097) + tdSql.checkData(5, 0, -0.5025573497604873) + tdSql.checkData(6, 0, 0.3048106211022167) + + tdSql.query('select sin(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.972630067242408) + tdSql.checkData(6, 0, -0.972630067242408) + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.1411200080598672) + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(13, 0, 0.6569865987187891) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(2, 0, 0.5984721441039565) + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(4, 0, -0.977530117665097) + tdSql.checkData(5, 0, -0.5025573497604873) + tdSql.checkData(6, 0, 0.3048106211022167) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(2, 0, 0.5984721441039565) + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(4, 0, -0.977530117665097) + tdSql.checkData(5, 0, -0.5025573497604873) + tdSql.checkData(6, 0, 0.3048106211022167) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.3048106211022167) + tdSql.checkData(4, 0, -0.5025573497604873) + tdSql.checkData(5, 0, -0.977530117665097) + tdSql.checkData(6, 0, -0.7568024953079282) + tdSql.checkData(7, 0, 0.5984721441039565) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.8414709848078965) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.5025573497604873) + tdSql.checkData(4, 0, -0.977530117665097) + tdSql.checkData(5, 0, -0.7568024953079282) + tdSql.checkData(6, 0, 0.5984721441039565) + tdSql.checkData(7, 0, 0.9092974268256817) + tdSql.checkData(8, 0, 0.8414709848078965) + tdSql.checkData(9, 0, 0.3048106211022167) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.3048106211022167) + tdSql.checkData(1, 0, 0.8414709848078965) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, 0.5984721441039565) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, -0.977530117665097) + tdSql.checkData(6, 0, -0.5025573497604873) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select sin(a) from (select sin(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.7456241416655579) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.7890723435728884) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6866002607386249) + tdSql.checkData(5, 0, 0.8263696344332049) + tdSql.checkData(6, 0, -0.8263696344332049) + + tdSql.query('select sin(tb1.c3),sin(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(0, 1, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 0.9092974268256817) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 0.1411200080598672) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(3, 1, -0.7568024953079282) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(4, 1, -0.9589242746631385) + tdSql.checkData(5, 0, 0.18750655394138943) + tdSql.checkData(5, 1, -0.27941549819892586) + tdSql.checkData(6, 0, -0.18750655394138943) + tdSql.checkData(6, 1, 0.6569865987187891) + + tdSql.query('select sin(c3) from tb1 union all select sin(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.18750655394138943) + tdSql.checkData(6, 0, -0.18750655394138943) + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.1411200080598672) + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(13, 0, 0.6569865987187891) + + #=========== end math_sin ========== + + + #=========== begin math_cos ========== + tdSql.query('select cos(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.9074467814501962) + tdSql.checkData(1, 0, 0.9074467814501962) + tdSql.checkData(2, 0, 0.9074467814501962) + tdSql.checkData(3, 0, 0.9074467814501962) + tdSql.checkData(4, 0, 0.9074467814501962) + tdSql.checkData(5, 0, 0.9074467814501962) + tdSql.checkData(6, 0, 0.9074467814501962) + + tdSql.query('select cos(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.23235910202965793) + tdSql.checkData(6, 0, 0.23235910202965793) + + tdSql.query('select cos(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.9822633517692823) + tdSql.checkData(6, 0, 0.9822633517692823) + + tdSql.query('select cos(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.6888366918779438) + tdSql.checkData(6, 0, -0.6888366918779438) + + tdSql.query('select cos(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.011800076512800236) + tdSql.checkData(6, 0, 0.011800076512800236) + + tdSql.query('select cos(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.8530210398303042) + tdSql.checkData(6, 0, 0.8530210398303042) + + tdSql.query('select cos(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.4880256326710555) + tdSql.checkData(6, 0, 0.4880256326710555) + + tdSql.query('select cos(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.892018495407942) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select cos(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.9296825844580496) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select cos(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.051008023845301335) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select cos(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.011800076512800236) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select cos(a) from (select cos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8575532158463934) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9146533258523714) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.7938734492261525) + tdSql.checkData(5, 0, 0.9731258638638398) + tdSql.checkData(6, 0, 0.9731258638638398) + + tdSql.query('select cos(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.4161468365471424) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.14550003380861354) + tdSql.checkData(5, 0, 0.04586331820534665) + tdSql.checkData(6, 0, 0.04586331820534665) + + tdSql.query('select cos((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.4161468365471424) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.14550003380861354) + tdSql.checkData(5, 0, 0.04586331820534665) + tdSql.checkData(6, 0, 0.04586331820534665) + + tdSql.query('select cos((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.6536436208636119) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.1367372182078336) + tdSql.checkData(5, 0, -0.9965147114630055) + tdSql.checkData(6, 0, 0.13413886076313122) + + tdSql.query('select cos(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0044256979880508) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2.004425697988051) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4.004425697988051) + tdSql.checkData(5, 0, 127.00442569798805) + tdSql.checkData(6, 0, -126.99557430201195) + + tdSql.query('select cos(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 11.54030230586814) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 10.583853163452858) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 10.346356379136388) + tdSql.checkData(5, 0, 11.232359102029658) + tdSql.checkData(6, 0, 11.232359102029658) + + tdSql.query('select cos(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, -0.4161468365471424) + tdSql.checkData(1, 0, -0.14550003380861354) + tdSql.checkData(2, 0, 0.04586331820534665) + tdSql.checkData(3, 0, 0.04586331820534665) + + tdSql.query('select cos(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.23235910202965793) + tdSql.checkData(1, 0, 0.23235910202965793) + tdSql.checkData(2, 0, -0.6536436208636119) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.4161468365471424) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.5403023058681398) + + tdSql.query('select cos(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.04586331820534665) + tdSql.checkData(1, 0, 0.04586331820534665) + tdSql.checkData(2, 0, -0.14550003380861354) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, -0.4161468365471424) + + tdSql.query('select cos(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, -0.14550003380861354) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select cos(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.23235910202965793) + tdSql.checkData(6, 0, 0.23235910202965793) + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, -0.9899924966004454) + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(13, 0, 0.7539022543433046) + + tdSql.query('select cos(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select cos(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select cos(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 0.9822633517692823) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, 0.9822633517692823) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, -0.9899924966004454) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 0.7539022543433046) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select cos(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, -0.9899924966004454) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 0.28366218546322625) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 0.960170286650366) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 0.7539022543433046) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select cos(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(2, 0, -0.8011436155469337) + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(4, 0, -0.2107957994307797) + tdSql.checkData(5, 0, -0.8645438740756395) + tdSql.checkData(6, 0, -0.9524129804151563) + + tdSql.query('select cos(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.23235910202965793) + tdSql.checkData(6, 0, 0.23235910202965793) + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, -0.9899924966004454) + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(13, 0, 0.7539022543433046) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(2, 0, -0.8011436155469337) + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(4, 0, -0.2107957994307797) + tdSql.checkData(5, 0, -0.8645438740756395) + tdSql.checkData(6, 0, -0.9524129804151563) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(2, 0, -0.8011436155469337) + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(4, 0, -0.2107957994307797) + tdSql.checkData(5, 0, -0.8645438740756395) + tdSql.checkData(6, 0, -0.9524129804151563) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9524129804151563) + tdSql.checkData(4, 0, -0.8645438740756395) + tdSql.checkData(5, 0, -0.2107957994307797) + tdSql.checkData(6, 0, -0.6536436208636119) + tdSql.checkData(7, 0, -0.8011436155469337) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, 0.5403023058681398) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.8645438740756395) + tdSql.checkData(4, 0, -0.2107957994307797) + tdSql.checkData(5, 0, -0.6536436208636119) + tdSql.checkData(6, 0, -0.8011436155469337) + tdSql.checkData(7, 0, -0.4161468365471424) + tdSql.checkData(8, 0, 0.5403023058681398) + tdSql.checkData(9, 0, -0.9524129804151563) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, -0.9524129804151563) + tdSql.checkData(1, 0, 0.5403023058681398) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, -0.8011436155469337) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.2107957994307797) + tdSql.checkData(6, 0, -0.8645438740756395) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select cos(a) from (select cos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8575532158463934) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9146533258523714) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.7938734492261525) + tdSql.checkData(5, 0, 0.9731258638638398) + tdSql.checkData(6, 0, 0.9731258638638398) + + tdSql.query('select cos(tb1.c3),cos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(0, 1, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, -0.4161468365471424) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, -0.9899924966004454) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(3, 1, -0.6536436208636119) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(4, 1, 0.28366218546322625) + tdSql.checkData(5, 0, 0.9822633517692823) + tdSql.checkData(5, 1, 0.960170286650366) + tdSql.checkData(6, 0, 0.9822633517692823) + tdSql.checkData(6, 1, 0.7539022543433046) + + tdSql.query('select cos(c3) from tb1 union all select cos(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.9822633517692823) + tdSql.checkData(6, 0, 0.9822633517692823) + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, -0.9899924966004454) + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(13, 0, 0.7539022543433046) + + #=========== end math_cos ========== + + + #=========== begin math_tan ========== + tdSql.query('select tan(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.4630211329364896) + tdSql.checkData(1, 0, 0.4630211329364896) + tdSql.checkData(2, 0, 0.4630211329364896) + tdSql.checkData(3, 0, 0.4630211329364896) + tdSql.checkData(4, 0, 0.4630211329364896) + tdSql.checkData(5, 0, 0.4630211329364896) + tdSql.checkData(6, 0, 0.4630211329364896) + + tdSql.query('select tan(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 4.185891831851989) + tdSql.checkData(6, 0, -4.185891831851989) + + tdSql.query('select tan(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 0.19089234430221486) + tdSql.checkData(6, 0, -0.19089234430221486) + + tdSql.query('select tan(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 1.0523779637351338) + tdSql.checkData(6, 0, -1.0523779637351338) + + tdSql.query('select tan(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 84.73931296875567) + tdSql.checkData(6, 0, -84.73931296875567) + + tdSql.query('select tan(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, -0.6117979498342481) + tdSql.checkData(6, 0, 0.6117979498342481) + + tdSql.query('select tan(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 1.7884906829818519) + tdSql.checkData(6, 0, -1.7884906829818519) + + tdSql.query('select tan(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, -0.5067147154345417) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select tan(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 0.39622301037411284) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select tan(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, -19.579238091943036) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select tan(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 84.73931296875567) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select tan(a) from (select tan(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 74.68593339876537) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4179285755053868) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.282204450191367) + tdSql.checkData(5, 0, 1.7205151938006633) + tdSql.checkData(6, 0, -1.7205151938006633) + + tdSql.query('select tan(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -2.185039863261519) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -6.799711455220379) + tdSql.checkData(5, 0, 21.780973629229287) + tdSql.checkData(6, 0, -21.780973629229287) + + tdSql.query('select tan((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -2.185039863261519) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -6.799711455220379) + tdSql.checkData(5, 0, 21.780973629229287) + tdSql.checkData(6, 0, -21.780973629229287) + + tdSql.query('select tan((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -1.1578212823495775) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 7.2446066160948055) + tdSql.checkData(5, 0, -0.08370895334379919) + tdSql.checkData(6, 0, 7.387587307544252) + + tdSql.query('select tan(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -224.95084645419513) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -223.95084645419513) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -221.95084645419513) + tdSql.checkData(5, 0, -98.95084645419513) + tdSql.checkData(6, 0, -352.95084645419513) + + tdSql.query('select tan(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.557407724654903) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 8.814960136738481) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 12.157821282349577) + tdSql.checkData(5, 0, 15.18589183185199) + tdSql.checkData(6, 0, 6.814108168148011) + + tdSql.query('select tan(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, -2.185039863261519) + tdSql.checkData(1, 0, -6.799711455220379) + tdSql.checkData(2, 0, 21.780973629229287) + tdSql.checkData(3, 0, -21.780973629229287) + + tdSql.query('select tan(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -4.185891831851989) + tdSql.checkData(1, 0, 4.185891831851989) + tdSql.checkData(2, 0, 1.1578212823495775) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -2.185039863261519) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5574077246549023) + + tdSql.query('select tan(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -21.780973629229287) + tdSql.checkData(1, 0, 21.780973629229287) + tdSql.checkData(2, 0, -6.799711455220379) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, -2.185039863261519) + + tdSql.query('select tan(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, -6.799711455220379) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select tan(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 4.185891831851989) + tdSql.checkData(6, 0, -4.185891831851989) + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, -0.1425465430742778) + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(13, 0, 0.8714479827243188) + + tdSql.query('select tan(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select tan(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select tan(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 0.19089234430221486) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, -0.19089234430221486) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, -0.1425465430742778) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 0.8714479827243188) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select tan(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, -0.1425465430742778) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, -3.380515006246586) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, -0.29100619138474915) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 0.8714479827243188) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select tan(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(2, 0, -0.7470222972386603) + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(4, 0, 4.637332054551185) + tdSql.checkData(5, 0, 0.5812976817374548) + tdSql.checkData(6, 0, -0.320040389379563) + + tdSql.query('select tan(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 4.185891831851989) + tdSql.checkData(6, 0, -4.185891831851989) + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, -0.1425465430742778) + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(13, 0, 0.8714479827243188) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(2, 0, -0.7470222972386603) + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(4, 0, 4.637332054551185) + tdSql.checkData(5, 0, 0.5812976817374548) + tdSql.checkData(6, 0, -0.320040389379563) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(2, 0, -0.7470222972386603) + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(4, 0, 4.637332054551185) + tdSql.checkData(5, 0, 0.5812976817374548) + tdSql.checkData(6, 0, -0.320040389379563) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.320040389379563) + tdSql.checkData(4, 0, 0.5812976817374548) + tdSql.checkData(5, 0, 4.637332054551185) + tdSql.checkData(6, 0, 1.1578212823495775) + tdSql.checkData(7, 0, -0.7470222972386603) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, 1.5574077246549023) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.5812976817374548) + tdSql.checkData(4, 0, 4.637332054551185) + tdSql.checkData(5, 0, 1.1578212823495775) + tdSql.checkData(6, 0, -0.7470222972386603) + tdSql.checkData(7, 0, -2.185039863261519) + tdSql.checkData(8, 0, 1.5574077246549023) + tdSql.checkData(9, 0, -0.320040389379563) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, -0.320040389379563) + tdSql.checkData(1, 0, 1.5574077246549023) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, -0.7470222972386603) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 4.637332054551185) + tdSql.checkData(6, 0, 0.5812976817374548) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select tan(a) from (select tan(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 74.68593339876537) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4179285755053868) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.282204450191367) + tdSql.checkData(5, 0, 1.7205151938006633) + tdSql.checkData(6, 0, -1.7205151938006633) + + tdSql.query('select tan(tb1.c3),tan(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(0, 1, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, -2.185039863261519) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, -0.1425465430742778) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(3, 1, 1.1578212823495775) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(4, 1, -3.380515006246586) + tdSql.checkData(5, 0, 0.19089234430221486) + tdSql.checkData(5, 1, -0.29100619138474915) + tdSql.checkData(6, 0, -0.19089234430221486) + tdSql.checkData(6, 1, 0.8714479827243188) + + tdSql.query('select tan(c3) from tb1 union all select tan(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 0.19089234430221486) + tdSql.checkData(6, 0, -0.19089234430221486) + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, -0.1425465430742778) + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(13, 0, 0.8714479827243188) + + #=========== end math_tan ========== + + + #=========== begin math_pow ========== + tdSql.query('select pow(c2,13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 8192.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 67108864.0) + tdSql.checkData(5, 0, 2.235879388560037e+27) + tdSql.checkData(6, 0, -2.235879388560037e+27) + + tdSql.query('select pow(c2,c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, 1.5243074119957227e+267) + tdSql.checkData(6, 0, -6.560356474884124e-268) + + tdSql.query('select pow(c2,c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, -0.0) + + tdSql.query('select pow(c2,c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, -0.0) + + tdSql.query('select pow(c2,c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 65536.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,(c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 65536.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,(c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 268435456.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2050.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4194308.0) + tdSql.checkData(5, 0, 1.3862479934032099e+23) + tdSql.checkData(6, 0, -1.3862479934032099e+23) + + tdSql.query('select pow(c2,c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 15.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 267.0) + tdSql.checkData(5, 0, 1.5243074119957227e+267) + tdSql.checkData(6, 0, 11.0) + + tdSql.query('select pow(c2,c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 65536.0) + tdSql.checkData(2, 0, inf) + tdSql.checkData(3, 0, 0.0) + + tdSql.query('select pow(c2,c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -6.560356474884124e-268) + tdSql.checkData(1, 0, 1.5243074119957227e+267) + tdSql.checkData(2, 0, 256.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4.0) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, inf) + tdSql.checkData(2, 0, 65536.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 65536.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select pow(c2,c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, 1.5243074119957227e+267) + tdSql.checkData(6, 0, -6.560356474884124e-268) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(9, 0, 27.0) + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(13, 0, 823543.0) + + tdSql.query('select pow(c2,c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select pow(c2,c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select pow(c2,c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, inf) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, -0.0) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 27.0) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 823543.0) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select pow(c2,c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 4.0) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 27.0) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 256.0) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 3125.0) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 46656.0) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 823543.0) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select pow(c2,c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, 1.5243074119957227e+267) + tdSql.checkData(6, 0, -6.560356474884124e-268) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(9, 0, 27.0) + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(13, 0, 823543.0) + + tdSql.query('select pow(c2,c3) from tb1 union all select pow(c2,c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, -0.0) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(9, 0, 27.0) + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(13, 0, 823543.0) + + #=========== end math_pow ========== + + + #=========== begin math_log ========== + tdSql.query('select log(c2,13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.27023815442731974) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.5404763088546395) + tdSql.checkData(5, 0, 1.8886092516277813) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.465913680008469) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.22544144151366513) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.11093150296463757) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.05459909915208762) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.006824887406193638) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.8748229478056855) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.4367939948774267) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.2183963964662152) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.11093150296463757) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.6666666666666667) + tdSql.checkData(5, 0, 0.4657403972991969) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,(c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.6666666666666667) + tdSql.checkData(5, 0, 0.4657403972991969) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,(c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.5252990700743871) + tdSql.checkData(5, 0, 0.22542113212116985) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2.2890648263178877) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4.578129652635775) + tdSql.checkData(5, 0, 129.02018292517226) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 12.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 12.0) + tdSql.checkData(5, 0, 12.0) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, 0.6666666666666667) + tdSql.checkData(2, 0, 0.4657403972991969) + tdSql.checkData(3, 0, None) + + tdSql.query('select log(c2,c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 0.4657403972991969) + tdSql.checkData(2, 0, 0.6666666666666667) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select log(c2,c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 0.6666666666666667) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select log(c2,c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(13, 0, 1.0) + + tdSql.query('select log(c2,c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select log(c2,c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select log(c2,c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, None) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 0.465913680008469) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, None) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 1.0) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select log(c2,c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 1.0) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 1.0) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select log(c2,c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(13, 0, 1.0) + + tdSql.query('select log(c2,c3) from tb1 union all select log(c2,c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.465913680008469) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(13, 0, 1.0) + + #=========== end math_log ========== + + + + tdSql.execute('create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') + + tdSql.execute("create table tba1 using stba tags(1,'1',1.0);") + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:00\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:01\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:02\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:04\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:05\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:06\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:07\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:08\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:09\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + self.restartTaosd(1, dbname='math_funcs') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:10\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:11\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:12\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:13\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:14\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:15\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:16\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:17\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:18\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:19\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + self.restartTaosd(1, dbname='math_funcs') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:20\',true, 1,1,1,1,1,1,"111","1111",1,1,1,1);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:21\',true, 2,2,2,2,2,2,"222","2222",2,2,2,2);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:22\',true, 3,3,2,3,3,3,"333","3333",3,3,3,3);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:23\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:24\',true, 5,5,5,5,5,5,"555","5555",5,5,5,5);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:25\',true, 6,6,6,6,6,6,"666","6666",6,6,6,6);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:26\',true, 7,7,7,7,7,7,"777","7777",7,7,7,7);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:27\',true, 8,8,8,8,8,8,"888","8888",8,8,8,8);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:28\',true, 9,9,9,9,9,9,"999","9999",9,9,9,9);') + + tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:29\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') + + #=========== begin math_sqrt ========== + tdSql.query('select sqrt(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 3.605551275463989) + tdSql.checkData(1, 0, 3.605551275463989) + tdSql.checkData(2, 0, 3.605551275463989) + tdSql.checkData(3, 0, 3.605551275463989) + tdSql.checkData(4, 0, 3.605551275463989) + tdSql.checkData(5, 0, 3.605551275463989) + tdSql.checkData(6, 0, 3.605551275463989) + + tdSql.query('select sqrt(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 11.269427669584644) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 181.01657382681842) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 46340.950001051984) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 3037000499.97605) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 1.844674352395373e+19) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 1.3407796239501852e+154) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 15.937377450509228) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sqrt(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 255.99609372019722) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sqrt(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 65535.99998474121) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sqrt(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 3037000499.97605) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sqrt(a) from (select sqrt(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.189207115002721) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.4142135623730951) + tdSql.checkData(5, 0, 3.3569968229929326) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.4142135623730951) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.8284271247461903) + tdSql.checkData(5, 0, 181.36703118262702) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.4142135623730951) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.8284271247461903) + tdSql.checkData(5, 0, 181.36703118262702) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 3.7416573867739413) + tdSql.checkData(5, 0, 46385.82811592351) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 4.3166247903554) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 5.3166247903554) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 7.3166247903554) + tdSql.checkData(5, 0, 130.3166247903554) + tdSql.checkData(6, 0, -123.6833752096446) + + tdSql.query('select sqrt(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 12.414213562373096) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 13.0) + tdSql.checkData(5, 0, 22.269427669584644) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 1.4142135623730951) + tdSql.checkData(1, 0, 2.8284271247461903) + tdSql.checkData(2, 0, 181.36703118262702) + tdSql.checkData(3, 0, None) + + tdSql.query('select sqrt(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 11.269427669584644) + tdSql.checkData(2, 0, 2.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.4142135623730951) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select sqrt(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 181.36703118262702) + tdSql.checkData(2, 0, 2.8284271247461903) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.4142135623730951) + + tdSql.query('select sqrt(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 2.8284271247461903) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select sqrt(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 11.269427669584644) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.7320508075688772) + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(13, 0, 2.6457513110645907) + + tdSql.query('select sqrt(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select sqrt(c4),t1,c4 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select sqrt(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 181.01657382681842) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 1.7320508075688772) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 2.6457513110645907) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select sqrt(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 1.7320508075688772) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 2.23606797749979) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 2.449489742783178) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 2.6457513110645907) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select sqrt(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(2, 0, 1.5811388300841898) + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(4, 0, 2.1213203435596424) + tdSql.checkData(5, 0, 8.154753215150045) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 11.269427669584644) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.7320508075688772) + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(13, 0, 2.6457513110645907) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(2, 0, 1.5811388300841898) + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(4, 0, 2.1213203435596424) + tdSql.checkData(5, 0, 8.154753215150045) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(2, 0, 1.5811388300841898) + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(4, 0, 2.1213203435596424) + tdSql.checkData(5, 0, 8.154753215150045) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 8.154753215150045) + tdSql.checkData(5, 0, 2.1213203435596424) + tdSql.checkData(6, 0, 2.0) + tdSql.checkData(7, 0, 1.5811388300841898) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.0) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 8.154753215150045) + tdSql.checkData(4, 0, 2.1213203435596424) + tdSql.checkData(5, 0, 2.0) + tdSql.checkData(6, 0, 1.5811388300841898) + tdSql.checkData(7, 0, 1.4142135623730951) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, None) + + tdSql.query("select sqrt(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, 1.5811388300841898) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 2.1213203435596424) + tdSql.checkData(6, 0, 8.154753215150045) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select sqrt(a) from (select sqrt(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.189207115002721) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.4142135623730951) + tdSql.checkData(5, 0, 3.3569968229929326) + tdSql.checkData(6, 0, None) + + tdSql.query('select sqrt(tb1.c3),sqrt(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 1.4142135623730951) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 1.7320508075688772) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(3, 1, 2.0) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(4, 1, 2.23606797749979) + tdSql.checkData(5, 0, 181.01657382681842) + tdSql.checkData(5, 1, 2.449489742783178) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 2.6457513110645907) + + tdSql.query('select sqrt(c3) from tb1 union all select sqrt(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 1.7320508075688772) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 181.01657382681842) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.7320508075688772) + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(13, 0, 2.6457513110645907) + + #=========== end math_sqrt ========== + + + #=========== begin math_sqrt2 ========== + tdSql.query('select sqrt(stb1.c4),sqrt(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 1.4142135623730951) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(2, 1, 1.7320508075688772) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 2.0) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(4, 1, 2.23606797749979) + tdSql.checkData(5, 0, 46340.950001051984) + tdSql.checkData(5, 1, 2.449489742783178) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 2.6457513110645907) + + tdSql.query('select sqrt(c4) as a from stb1 union all select sqrt(c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4142135623730951) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.0) + tdSql.checkData(5, 0, 46340.950001051984) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.4142135623730951) + tdSql.checkData(9, 0, 1.4142135623730951) + tdSql.checkData(10, 0, 2.0) + tdSql.checkData(11, 0, 2.23606797749979) + tdSql.checkData(12, 0, 2.449489742783178) + tdSql.checkData(13, 0, 2.6457513110645907) + tdSql.checkData(14, 0, 1.0) + tdSql.checkData(15, 0, 1.4142135623730951) + tdSql.checkData(16, 0, 1.7320508075688772) + tdSql.checkData(17, 0, 2.0) + tdSql.checkData(18, 0, 2.23606797749979) + tdSql.checkData(19, 0, 2.449489742783178) + tdSql.checkData(20, 0, 2.6457513110645907) + tdSql.checkData(21, 0, 2.8284271247461903) + tdSql.checkData(22, 0, 3.0) + tdSql.checkData(23, 0, 0.0) + tdSql.checkData(24, 0, 1.0) + tdSql.checkData(25, 0, 1.4142135623730951) + tdSql.checkData(26, 0, 1.7320508075688772) + tdSql.checkData(27, 0, 2.0) + tdSql.checkData(28, 0, 2.23606797749979) + tdSql.checkData(29, 0, 2.449489742783178) + tdSql.checkData(30, 0, 2.6457513110645907) + tdSql.checkData(31, 0, 2.8284271247461903) + tdSql.checkData(32, 0, 3.0) + tdSql.checkData(33, 0, 0.0) + tdSql.checkData(34, 0, 1.0) + tdSql.checkData(35, 0, 1.4142135623730951) + tdSql.checkData(36, 0, 1.7320508075688772) + tdSql.checkData(37, 0, 2.0) + tdSql.checkData(38, 0, 2.23606797749979) + tdSql.checkData(39, 0, 2.449489742783178) + tdSql.checkData(40, 0, 2.6457513110645907) + tdSql.checkData(41, 0, 2.8284271247461903) + tdSql.checkData(42, 0, 3.0) + tdSql.checkData(43, 0, 0.0) + + tdSql.query('select sqrt(c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 1.4142135623730951) + tdSql.checkData(2, 0, 1.7320508075688772) + tdSql.checkData(3, 0, 2.0) + tdSql.checkData(4, 0, 2.23606797749979) + tdSql.checkData(5, 0, 2.449489742783178) + tdSql.checkData(6, 0, 2.6457513110645907) + tdSql.checkData(7, 0, 2.8284271247461903) + tdSql.checkData(8, 0, 3.0) + tdSql.checkData(9, 0, 0.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 1.4142135623730951) + tdSql.checkData(12, 0, 1.7320508075688772) + tdSql.checkData(13, 0, 2.0) + tdSql.checkData(14, 0, 2.23606797749979) + tdSql.checkData(15, 0, 2.449489742783178) + tdSql.checkData(16, 0, 2.6457513110645907) + tdSql.checkData(17, 0, 2.8284271247461903) + tdSql.checkData(18, 0, 3.0) + tdSql.checkData(19, 0, 0.0) + tdSql.checkData(20, 0, 1.0) + tdSql.checkData(21, 0, 1.4142135623730951) + tdSql.checkData(22, 0, 1.7320508075688772) + tdSql.checkData(23, 0, 2.0) + tdSql.checkData(24, 0, 2.23606797749979) + tdSql.checkData(25, 0, 2.449489742783178) + tdSql.checkData(26, 0, 2.6457513110645907) + tdSql.checkData(27, 0, 2.8284271247461903) + tdSql.checkData(28, 0, 3.0) + tdSql.checkData(29, 0, 0.0) + + tdSql.query('select sqrt(min(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select sqrt(max(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 3.0) + + tdSql.query('select sqrt(count(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 5.477225575051661) + + tdSql.query('select sqrt(sum(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 11.61895003862225) + + tdSql.query('select sqrt(avg(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 2.1213203435596424) + + tdSql.query('select sqrt(percentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.9486832980505138) + + tdSql.query('select sqrt(apercentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select sqrt(stddev(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.694780612135097) + + tdSql.query('select sqrt(spread(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 3.0) + + tdSql.query('select sqrt(twa(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 2.1535856227423973) + + tdSql.query('select sqrt(interp(c2)) from tba1 every(1s)') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, 1.4142135623730951) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, 1.7320508075688772) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, 2.0) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, 2.23606797749979) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, 2.449489742783178) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 2.6457513110645907) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, 2.8284271247461903) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, 3.0) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0.0) + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 1.0) + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, 1.4142135623730951) + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, 1.7320508075688772) + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, 2.0) + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, 2.23606797749979) + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, 2.449489742783178) + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 2.6457513110645907) + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, 2.8284271247461903) + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, 3.0) + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0.0) + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 1.0) + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, 1.4142135623730951) + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, 1.7320508075688772) + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, 2.0) + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, 2.23606797749979) + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, 2.449489742783178) + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 2.6457513110645907) + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, 2.8284271247461903) + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, 3.0) + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0.0) + + tdSql.query('select sqrt(interp(c2)) from stba every(1s) group by tbname;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 1.0) + tdSql.checkData(0, 2, 'tba1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, 1.4142135623730951) + tdSql.checkData(1, 2, 'tba1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, 1.7320508075688772) + tdSql.checkData(2, 2, 'tba1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, 2.0) + tdSql.checkData(3, 2, 'tba1') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, 2.23606797749979) + tdSql.checkData(4, 2, 'tba1') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, 2.449489742783178) + tdSql.checkData(5, 2, 'tba1') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 2.6457513110645907) + tdSql.checkData(6, 2, 'tba1') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, 2.8284271247461903) + tdSql.checkData(7, 2, 'tba1') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, 3.0) + tdSql.checkData(8, 2, 'tba1') + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0.0) + tdSql.checkData(9, 2, 'tba1') + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 1.0) + tdSql.checkData(10, 2, 'tba1') + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, 1.4142135623730951) + tdSql.checkData(11, 2, 'tba1') + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, 1.7320508075688772) + tdSql.checkData(12, 2, 'tba1') + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, 2.0) + tdSql.checkData(13, 2, 'tba1') + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, 2.23606797749979) + tdSql.checkData(14, 2, 'tba1') + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, 2.449489742783178) + tdSql.checkData(15, 2, 'tba1') + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 2.6457513110645907) + tdSql.checkData(16, 2, 'tba1') + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, 2.8284271247461903) + tdSql.checkData(17, 2, 'tba1') + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, 3.0) + tdSql.checkData(18, 2, 'tba1') + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0.0) + tdSql.checkData(19, 2, 'tba1') + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 1.0) + tdSql.checkData(20, 2, 'tba1') + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, 1.4142135623730951) + tdSql.checkData(21, 2, 'tba1') + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, 1.7320508075688772) + tdSql.checkData(22, 2, 'tba1') + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, 2.0) + tdSql.checkData(23, 2, 'tba1') + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, 2.23606797749979) + tdSql.checkData(24, 2, 'tba1') + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, 2.449489742783178) + tdSql.checkData(25, 2, 'tba1') + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 2.6457513110645907) + tdSql.checkData(26, 2, 'tba1') + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, 2.8284271247461903) + tdSql.checkData(27, 2, 'tba1') + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, 3.0) + tdSql.checkData(28, 2, 'tba1') + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0.0) + tdSql.checkData(29, 2, 'tba1') + + tdSql.query('select sqrt(elapsed(ts)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 170.29386365926402) + + tdSql.query('select sqrt(rate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.9468641529479986) + + tdSql.query('select sqrt(irate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select sqrt(first(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.0) + + tdSql.query('select sqrt(last(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select sqrt(last_row(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + #=========== end math_sqrt2 ========== + + + #=========== begin math_abs ========== + tdSql.query('select abs(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 13) + tdSql.checkData(1, 0, 13) + tdSql.checkData(2, 0, 13) + tdSql.checkData(3, 0, 13) + tdSql.checkData(4, 0, 13) + tdSql.checkData(5, 0, 13) + tdSql.checkData(6, 0, 13) + + tdSql.query('select abs(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + + tdSql.query('select abs(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 32767) + tdSql.checkData(6, 0, 32767) + + tdSql.query('select abs(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 2147483647) + tdSql.checkData(6, 0, 2147483647) + + tdSql.query('select abs(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 9223372036854775807) + tdSql.checkData(6, 0, 9223372036854775807) + + tdSql.query('select abs(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4.0) + tdSql.checkData(5, 0, 3.4028234663852886e+38) + tdSql.checkData(6, 0, 3.4028234663852886e+38) + + tdSql.query('select abs(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3.0) + tdSql.checkData(4, 0, 4.0) + tdSql.checkData(5, 0, 1.79769e+308) + tdSql.checkData(6, 0, 1.79769e+308) + + tdSql.query('select abs(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 254) + tdSql.checkData(6, 0, 0) + + tdSql.query('select abs(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 65534) + tdSql.checkData(6, 0, 0) + + tdSql.query('select abs(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 4294967294) + tdSql.checkData(6, 0, 0) + + tdSql.query('select abs(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 9223372036854775807) + tdSql.checkData(6, 0, 0) + + tdSql.query('select abs(a) from (select abs(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + + tdSql.query('select abs(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 8.0) + tdSql.checkData(5, 0, 32894.0) + tdSql.checkData(6, 0, 32894.0) + + tdSql.query('select abs((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 8.0) + tdSql.checkData(5, 0, 32894.0) + tdSql.checkData(6, 0, 32894.0) + + tdSql.query('select abs((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 4.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 14.0) + tdSql.checkData(5, 0, 2151645050.0) + tdSql.checkData(6, 0, 2143322244.0) + + tdSql.query('select abs(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 13.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 15.0) + tdSql.checkData(5, 0, 138.0) + tdSql.checkData(6, 0, -116.0) + + tdSql.query('select abs(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 13.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 15.0) + tdSql.checkData(5, 0, 138.0) + tdSql.checkData(6, 0, 138.0) + + tdSql.query('select abs(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, 8.0) + tdSql.checkData(2, 0, 32894.0) + tdSql.checkData(3, 0, 32894.0) + + tdSql.query('select abs(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 127) + tdSql.checkData(1, 0, 127) + tdSql.checkData(2, 0, 4) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1) + + tdSql.query('select abs(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 32894.0) + tdSql.checkData(1, 0, 32894.0) + tdSql.checkData(2, 0, 8.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 2.0) + + tdSql.query('select abs(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 8.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select abs(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 2) + tdSql.checkData(9, 0, 3) + tdSql.checkData(10, 0, 4) + tdSql.checkData(11, 0, 5) + tdSql.checkData(12, 0, 6) + tdSql.checkData(13, 0, 7) + + tdSql.query('select abs(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select abs(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select abs(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, 3) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 4) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 32767) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, 32767) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 2) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 3) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 4) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 5) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 6) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 7) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select abs(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 2) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 3) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 4) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 5) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 6) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 7) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select abs(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 2.0) + tdSql.checkData(2, 0, 2.5) + tdSql.checkData(3, 0, 4.0) + tdSql.checkData(4, 0, 4.5) + tdSql.checkData(5, 0, 66.5) + tdSql.checkData(6, 0, 60.0) + + tdSql.query('select abs(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 2) + tdSql.checkData(9, 0, 3) + tdSql.checkData(10, 0, 4) + tdSql.checkData(11, 0, 5) + tdSql.checkData(12, 0, 6) + tdSql.checkData(13, 0, 7) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 2.0) + tdSql.checkData(2, 0, 2.5) + tdSql.checkData(3, 0, 4.0) + tdSql.checkData(4, 0, 4.5) + tdSql.checkData(5, 0, 66.5) + tdSql.checkData(6, 0, 60.0) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 2.0) + tdSql.checkData(2, 0, 2.5) + tdSql.checkData(3, 0, 4.0) + tdSql.checkData(4, 0, 4.5) + tdSql.checkData(5, 0, 66.5) + tdSql.checkData(6, 0, 60.0) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 60.0) + tdSql.checkData(4, 0, 66.5) + tdSql.checkData(5, 0, 4.5) + tdSql.checkData(6, 0, 4.0) + tdSql.checkData(7, 0, 2.5) + tdSql.checkData(8, 0, 2.0) + tdSql.checkData(9, 0, 1.0) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 66.5) + tdSql.checkData(4, 0, 4.5) + tdSql.checkData(5, 0, 4.0) + tdSql.checkData(6, 0, 2.5) + tdSql.checkData(7, 0, 2.0) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 60.0) + + tdSql.query("select abs(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 60.0) + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(2, 0, 2.0) + tdSql.checkData(3, 0, 2.5) + tdSql.checkData(4, 0, 4.0) + tdSql.checkData(5, 0, 4.5) + tdSql.checkData(6, 0, 66.5) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select abs(a) from (select abs(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 127) + tdSql.checkData(6, 0, 127) + + tdSql.query('select abs(tb1.c3),abs(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 3) + tdSql.checkData(3, 0, 3) + tdSql.checkData(3, 1, 4) + tdSql.checkData(4, 0, 4) + tdSql.checkData(4, 1, 5) + tdSql.checkData(5, 0, 32767) + tdSql.checkData(5, 1, 6) + tdSql.checkData(6, 0, 32767) + tdSql.checkData(6, 1, 7) + + tdSql.query('select abs(c3) from tb1 union all select abs(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 3) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 32767) + tdSql.checkData(6, 0, 32767) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 2) + tdSql.checkData(9, 0, 3) + tdSql.checkData(10, 0, 4) + tdSql.checkData(11, 0, 5) + tdSql.checkData(12, 0, 6) + tdSql.checkData(13, 0, 7) + + #=========== end math_abs ========== + + + #=========== begin math_abs2 ========== + tdSql.query('select abs(stb1.c4),abs(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, 2) + tdSql.checkData(2, 1, 3) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 4) + tdSql.checkData(4, 0, 4) + tdSql.checkData(4, 1, 5) + tdSql.checkData(5, 0, 2147483647) + tdSql.checkData(5, 1, 6) + tdSql.checkData(6, 0, 2147483647) + tdSql.checkData(6, 1, 7) + + tdSql.query('select abs(c4) as a from stb1 union all select abs(c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4) + tdSql.checkData(5, 0, 2147483647) + tdSql.checkData(6, 0, 2147483647) + tdSql.checkData(7, 0, 1) + tdSql.checkData(8, 0, 2) + tdSql.checkData(9, 0, 2) + tdSql.checkData(10, 0, 4) + tdSql.checkData(11, 0, 5) + tdSql.checkData(12, 0, 6) + tdSql.checkData(13, 0, 7) + tdSql.checkData(14, 0, 1) + tdSql.checkData(15, 0, 2) + tdSql.checkData(16, 0, 3) + tdSql.checkData(17, 0, 4) + tdSql.checkData(18, 0, 5) + tdSql.checkData(19, 0, 6) + tdSql.checkData(20, 0, 7) + tdSql.checkData(21, 0, 8) + tdSql.checkData(22, 0, 9) + tdSql.checkData(23, 0, 0) + tdSql.checkData(24, 0, 1) + tdSql.checkData(25, 0, 2) + tdSql.checkData(26, 0, 3) + tdSql.checkData(27, 0, 4) + tdSql.checkData(28, 0, 5) + tdSql.checkData(29, 0, 6) + tdSql.checkData(30, 0, 7) + tdSql.checkData(31, 0, 8) + tdSql.checkData(32, 0, 9) + tdSql.checkData(33, 0, 0) + tdSql.checkData(34, 0, 1) + tdSql.checkData(35, 0, 2) + tdSql.checkData(36, 0, 3) + tdSql.checkData(37, 0, 4) + tdSql.checkData(38, 0, 5) + tdSql.checkData(39, 0, 6) + tdSql.checkData(40, 0, 7) + tdSql.checkData(41, 0, 8) + tdSql.checkData(42, 0, 9) + tdSql.checkData(43, 0, 0) + + tdSql.query('select abs(c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 2) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 4) + tdSql.checkData(4, 0, 5) + tdSql.checkData(5, 0, 6) + tdSql.checkData(6, 0, 7) + tdSql.checkData(7, 0, 8) + tdSql.checkData(8, 0, 9) + tdSql.checkData(9, 0, 0) + tdSql.checkData(10, 0, 1) + tdSql.checkData(11, 0, 2) + tdSql.checkData(12, 0, 3) + tdSql.checkData(13, 0, 4) + tdSql.checkData(14, 0, 5) + tdSql.checkData(15, 0, 6) + tdSql.checkData(16, 0, 7) + tdSql.checkData(17, 0, 8) + tdSql.checkData(18, 0, 9) + tdSql.checkData(19, 0, 0) + tdSql.checkData(20, 0, 1) + tdSql.checkData(21, 0, 2) + tdSql.checkData(22, 0, 3) + tdSql.checkData(23, 0, 4) + tdSql.checkData(24, 0, 5) + tdSql.checkData(25, 0, 6) + tdSql.checkData(26, 0, 7) + tdSql.checkData(27, 0, 8) + tdSql.checkData(28, 0, 9) + tdSql.checkData(29, 0, 0) + + tdSql.query('select abs(min(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select abs(max(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9) + + tdSql.query('select abs(count(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 30) + + tdSql.query('select abs(sum(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 135) + + tdSql.query('select abs(avg(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 4.5) + + tdSql.query('select abs(percentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.8999999999999999) + + tdSql.query('select abs(apercentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select abs(stddev(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 2.8722813232690143) + + tdSql.query('select abs(spread(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 9.0) + + tdSql.query('select abs(twa(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 4.637931034482759) + + tdSql.query('select abs(interp(c2)) from tba1 every(1s)') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, 3) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, 4) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, 5) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, 6) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 7) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, 8) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, 9) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0) + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 1) + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, 2) + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, 3) + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, 4) + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, 5) + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, 6) + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 7) + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, 8) + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, 9) + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0) + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 1) + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, 2) + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, 3) + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, 4) + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, 5) + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, 6) + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 7) + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, 8) + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, 9) + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0) + + tdSql.query('select abs(interp(c2)) from stba every(1s) group by tbname;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 'tba1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, 2) + tdSql.checkData(1, 2, 'tba1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, 3) + tdSql.checkData(2, 2, 'tba1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, 4) + tdSql.checkData(3, 2, 'tba1') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, 5) + tdSql.checkData(4, 2, 'tba1') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, 6) + tdSql.checkData(5, 2, 'tba1') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 7) + tdSql.checkData(6, 2, 'tba1') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, 8) + tdSql.checkData(7, 2, 'tba1') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, 9) + tdSql.checkData(8, 2, 'tba1') + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0) + tdSql.checkData(9, 2, 'tba1') + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 1) + tdSql.checkData(10, 2, 'tba1') + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, 2) + tdSql.checkData(11, 2, 'tba1') + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, 3) + tdSql.checkData(12, 2, 'tba1') + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, 4) + tdSql.checkData(13, 2, 'tba1') + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, 5) + tdSql.checkData(14, 2, 'tba1') + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, 6) + tdSql.checkData(15, 2, 'tba1') + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 7) + tdSql.checkData(16, 2, 'tba1') + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, 8) + tdSql.checkData(17, 2, 'tba1') + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, 9) + tdSql.checkData(18, 2, 'tba1') + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0) + tdSql.checkData(19, 2, 'tba1') + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 1) + tdSql.checkData(20, 2, 'tba1') + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, 2) + tdSql.checkData(21, 2, 'tba1') + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, 3) + tdSql.checkData(22, 2, 'tba1') + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, 4) + tdSql.checkData(23, 2, 'tba1') + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, 5) + tdSql.checkData(24, 2, 'tba1') + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, 6) + tdSql.checkData(25, 2, 'tba1') + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 7) + tdSql.checkData(26, 2, 'tba1') + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, 8) + tdSql.checkData(27, 2, 'tba1') + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, 9) + tdSql.checkData(28, 2, 'tba1') + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0) + tdSql.checkData(29, 2, 'tba1') + + tdSql.query('select abs(elapsed(ts)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 29000.0) + + tdSql.query('select abs(rate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.896551724137931) + + tdSql.query('select abs(irate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select abs(first(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query('select abs(last(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + tdSql.query('select abs(last_row(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + + #=========== end math_abs2 ========== + + + #=========== begin math_asin ========== + tdSql.query('select asin(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select asin(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select asin(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select asin(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select asin(a) from (select asin(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.570796326794897) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + + tdSql.query('select asin(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select asin(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select asin(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query('select asin(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select asin(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select asin(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, None) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, None) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, None) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, None) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, None) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, None) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select asin(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select asin(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 1.5707963267948966) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.5707963267948966) + tdSql.checkData(9, 0, None) + + tdSql.query("select asin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1.5707963267948966) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select asin(a) from (select asin(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select asin(tb1.c3),asin(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(0, 1, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select asin(c3) from tb1 union all select asin(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + #=========== end math_asin ========== + + + #=========== begin math_asin2 ========== + tdSql.query('select asin(stb1.c4),asin(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(0, 1, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select asin(c4) as a from stb1 union all select asin(c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 1.5707963267948966) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + tdSql.checkData(14, 0, 1.5707963267948966) + tdSql.checkData(15, 0, None) + tdSql.checkData(16, 0, None) + tdSql.checkData(17, 0, None) + tdSql.checkData(18, 0, None) + tdSql.checkData(19, 0, None) + tdSql.checkData(20, 0, None) + tdSql.checkData(21, 0, None) + tdSql.checkData(22, 0, None) + tdSql.checkData(23, 0, 0.0) + tdSql.checkData(24, 0, 1.5707963267948966) + tdSql.checkData(25, 0, None) + tdSql.checkData(26, 0, None) + tdSql.checkData(27, 0, None) + tdSql.checkData(28, 0, None) + tdSql.checkData(29, 0, None) + tdSql.checkData(30, 0, None) + tdSql.checkData(31, 0, None) + tdSql.checkData(32, 0, None) + tdSql.checkData(33, 0, 0.0) + tdSql.checkData(34, 0, 1.5707963267948966) + tdSql.checkData(35, 0, None) + tdSql.checkData(36, 0, None) + tdSql.checkData(37, 0, None) + tdSql.checkData(38, 0, None) + tdSql.checkData(39, 0, None) + tdSql.checkData(40, 0, None) + tdSql.checkData(41, 0, None) + tdSql.checkData(42, 0, None) + tdSql.checkData(43, 0, 0.0) + + tdSql.query('select asin(c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 0.0) + tdSql.checkData(10, 0, 1.5707963267948966) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + tdSql.checkData(14, 0, None) + tdSql.checkData(15, 0, None) + tdSql.checkData(16, 0, None) + tdSql.checkData(17, 0, None) + tdSql.checkData(18, 0, None) + tdSql.checkData(19, 0, 0.0) + tdSql.checkData(20, 0, 1.5707963267948966) + tdSql.checkData(21, 0, None) + tdSql.checkData(22, 0, None) + tdSql.checkData(23, 0, None) + tdSql.checkData(24, 0, None) + tdSql.checkData(25, 0, None) + tdSql.checkData(26, 0, None) + tdSql.checkData(27, 0, None) + tdSql.checkData(28, 0, None) + tdSql.checkData(29, 0, 0.0) + + tdSql.query('select asin(min(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select asin(max(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select asin(count(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select asin(sum(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select asin(avg(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select asin(percentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.119769514998634) + + tdSql.query('select asin(apercentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select asin(stddev(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select asin(spread(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select asin(twa(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select asin(interp(c2)) from tba1 every(1s)') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 1.5707963267948966) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, None) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, None) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, None) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0.0) + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 1.5707963267948966) + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, None) + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, None) + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, None) + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, None) + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, None) + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, None) + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, None) + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, None) + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0.0) + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 1.5707963267948966) + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, None) + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, None) + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, None) + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, None) + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, None) + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, None) + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, None) + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, None) + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0.0) + + tdSql.query('select asin(interp(c2)) from stba every(1s) group by tbname;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 1.5707963267948966) + tdSql.checkData(0, 2, 'tba1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, None) + tdSql.checkData(1, 2, 'tba1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, None) + tdSql.checkData(2, 2, 'tba1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, None) + tdSql.checkData(3, 2, 'tba1') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, None) + tdSql.checkData(4, 2, 'tba1') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, None) + tdSql.checkData(5, 2, 'tba1') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, None) + tdSql.checkData(6, 2, 'tba1') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, None) + tdSql.checkData(7, 2, 'tba1') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, None) + tdSql.checkData(8, 2, 'tba1') + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0.0) + tdSql.checkData(9, 2, 'tba1') + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 1.5707963267948966) + tdSql.checkData(10, 2, 'tba1') + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, None) + tdSql.checkData(11, 2, 'tba1') + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, None) + tdSql.checkData(12, 2, 'tba1') + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, None) + tdSql.checkData(13, 2, 'tba1') + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, None) + tdSql.checkData(14, 2, 'tba1') + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, None) + tdSql.checkData(15, 2, 'tba1') + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, None) + tdSql.checkData(16, 2, 'tba1') + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, None) + tdSql.checkData(17, 2, 'tba1') + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, None) + tdSql.checkData(18, 2, 'tba1') + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0.0) + tdSql.checkData(19, 2, 'tba1') + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 1.5707963267948966) + tdSql.checkData(20, 2, 'tba1') + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, None) + tdSql.checkData(21, 2, 'tba1') + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, None) + tdSql.checkData(22, 2, 'tba1') + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, None) + tdSql.checkData(23, 2, 'tba1') + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, None) + tdSql.checkData(24, 2, 'tba1') + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, None) + tdSql.checkData(25, 2, 'tba1') + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, None) + tdSql.checkData(26, 2, 'tba1') + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, None) + tdSql.checkData(27, 2, 'tba1') + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, None) + tdSql.checkData(28, 2, 'tba1') + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0.0) + tdSql.checkData(29, 2, 'tba1') + + tdSql.query('select asin(elapsed(ts)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select asin(rate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.1119221217807869) + + tdSql.query('select asin(irate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select asin(first(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select asin(last(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select asin(last_row(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + #=========== end math_asin2 ========== + + + #=========== begin math_acos ========== + tdSql.query('select acos(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(a) from (select acos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 11.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + + tdSql.query('select acos(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select acos(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select acos(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query('select acos(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select acos(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select acos(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, None) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, None) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, None) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, None) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, None) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, None) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select acos(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select acos(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 0.0) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 0.0) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 0.0) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select acos(a) from (select acos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(tb1.c3),acos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select acos(c3) from tb1 union all select acos(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + #=========== end math_acos ========== + + + #=========== begin math_acos2 ========== + tdSql.query('select acos(stb1.c4),acos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select acos(c4) as a from stb1 union all select acos(c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + tdSql.checkData(14, 0, 0.0) + tdSql.checkData(15, 0, None) + tdSql.checkData(16, 0, None) + tdSql.checkData(17, 0, None) + tdSql.checkData(18, 0, None) + tdSql.checkData(19, 0, None) + tdSql.checkData(20, 0, None) + tdSql.checkData(21, 0, None) + tdSql.checkData(22, 0, None) + tdSql.checkData(23, 0, 1.5707963267948966) + tdSql.checkData(24, 0, 0.0) + tdSql.checkData(25, 0, None) + tdSql.checkData(26, 0, None) + tdSql.checkData(27, 0, None) + tdSql.checkData(28, 0, None) + tdSql.checkData(29, 0, None) + tdSql.checkData(30, 0, None) + tdSql.checkData(31, 0, None) + tdSql.checkData(32, 0, None) + tdSql.checkData(33, 0, 1.5707963267948966) + tdSql.checkData(34, 0, 0.0) + tdSql.checkData(35, 0, None) + tdSql.checkData(36, 0, None) + tdSql.checkData(37, 0, None) + tdSql.checkData(38, 0, None) + tdSql.checkData(39, 0, None) + tdSql.checkData(40, 0, None) + tdSql.checkData(41, 0, None) + tdSql.checkData(42, 0, None) + tdSql.checkData(43, 0, 1.5707963267948966) + + tdSql.query('select acos(c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 1.5707963267948966) + tdSql.checkData(10, 0, 0.0) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + tdSql.checkData(14, 0, None) + tdSql.checkData(15, 0, None) + tdSql.checkData(16, 0, None) + tdSql.checkData(17, 0, None) + tdSql.checkData(18, 0, None) + tdSql.checkData(19, 0, 1.5707963267948966) + tdSql.checkData(20, 0, 0.0) + tdSql.checkData(21, 0, None) + tdSql.checkData(22, 0, None) + tdSql.checkData(23, 0, None) + tdSql.checkData(24, 0, None) + tdSql.checkData(25, 0, None) + tdSql.checkData(26, 0, None) + tdSql.checkData(27, 0, None) + tdSql.checkData(28, 0, None) + tdSql.checkData(29, 0, 1.5707963267948966) + + tdSql.query('select acos(min(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select acos(max(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(count(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(sum(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(avg(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(percentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.45102681179626264) + + tdSql.query('select acos(apercentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select acos(stddev(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(spread(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(twa(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(interp(c2)) from tba1 every(1s)') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, None) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, None) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, None) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 1.5707963267948966) + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 0.0) + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, None) + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, None) + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, None) + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, None) + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, None) + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, None) + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, None) + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, None) + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 1.5707963267948966) + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 0.0) + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, None) + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, None) + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, None) + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, None) + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, None) + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, None) + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, None) + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, None) + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 1.5707963267948966) + + tdSql.query('select acos(interp(c2)) from stba every(1s) group by tbname;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(0, 2, 'tba1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, None) + tdSql.checkData(1, 2, 'tba1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, None) + tdSql.checkData(2, 2, 'tba1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, None) + tdSql.checkData(3, 2, 'tba1') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, None) + tdSql.checkData(4, 2, 'tba1') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, None) + tdSql.checkData(5, 2, 'tba1') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, None) + tdSql.checkData(6, 2, 'tba1') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, None) + tdSql.checkData(7, 2, 'tba1') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, None) + tdSql.checkData(8, 2, 'tba1') + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 1.5707963267948966) + tdSql.checkData(9, 2, 'tba1') + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 0.0) + tdSql.checkData(10, 2, 'tba1') + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, None) + tdSql.checkData(11, 2, 'tba1') + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, None) + tdSql.checkData(12, 2, 'tba1') + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, None) + tdSql.checkData(13, 2, 'tba1') + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, None) + tdSql.checkData(14, 2, 'tba1') + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, None) + tdSql.checkData(15, 2, 'tba1') + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, None) + tdSql.checkData(16, 2, 'tba1') + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, None) + tdSql.checkData(17, 2, 'tba1') + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, None) + tdSql.checkData(18, 2, 'tba1') + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 1.5707963267948966) + tdSql.checkData(19, 2, 'tba1') + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 0.0) + tdSql.checkData(20, 2, 'tba1') + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, None) + tdSql.checkData(21, 2, 'tba1') + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, None) + tdSql.checkData(22, 2, 'tba1') + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, None) + tdSql.checkData(23, 2, 'tba1') + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, None) + tdSql.checkData(24, 2, 'tba1') + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, None) + tdSql.checkData(25, 2, 'tba1') + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, None) + tdSql.checkData(26, 2, 'tba1') + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, None) + tdSql.checkData(27, 2, 'tba1') + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, None) + tdSql.checkData(28, 2, 'tba1') + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 1.5707963267948966) + tdSql.checkData(29, 2, 'tba1') + + tdSql.query('select acos(elapsed(ts)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(rate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.4588742050141097) + + tdSql.query('select acos(irate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select acos(first(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select acos(last(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select acos(last_row(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + #=========== end math_acos2 ========== + + + #=========== begin math_atan ========== + tdSql.query('select acos(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5707963267948966) + + tdSql.query('select acos(a) from (select acos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 11.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + + tdSql.query('select acos(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select acos(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select acos(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query('select acos(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select acos(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select acos(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, None) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, None) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, None) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, None) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, None) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, None) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select acos(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select acos(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 0.0) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 0.0) + tdSql.checkData(9, 0, None) + + tdSql.query("select acos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 0.0) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select acos(a) from (select acos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5707963267948966) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select acos(tb1.c3),acos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select acos(c3) from tb1 union all select acos(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + + #=========== end math_atan ========== + + + #=========== begin math_atan2 ========== + tdSql.query('select acos(stb1.c4),acos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, None) + + tdSql.query('select acos(c4) as a from stb1 union all select acos(c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, 0.0) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + tdSql.checkData(14, 0, 0.0) + tdSql.checkData(15, 0, None) + tdSql.checkData(16, 0, None) + tdSql.checkData(17, 0, None) + tdSql.checkData(18, 0, None) + tdSql.checkData(19, 0, None) + tdSql.checkData(20, 0, None) + tdSql.checkData(21, 0, None) + tdSql.checkData(22, 0, None) + tdSql.checkData(23, 0, 1.5707963267948966) + tdSql.checkData(24, 0, 0.0) + tdSql.checkData(25, 0, None) + tdSql.checkData(26, 0, None) + tdSql.checkData(27, 0, None) + tdSql.checkData(28, 0, None) + tdSql.checkData(29, 0, None) + tdSql.checkData(30, 0, None) + tdSql.checkData(31, 0, None) + tdSql.checkData(32, 0, None) + tdSql.checkData(33, 0, 1.5707963267948966) + tdSql.checkData(34, 0, 0.0) + tdSql.checkData(35, 0, None) + tdSql.checkData(36, 0, None) + tdSql.checkData(37, 0, None) + tdSql.checkData(38, 0, None) + tdSql.checkData(39, 0, None) + tdSql.checkData(40, 0, None) + tdSql.checkData(41, 0, None) + tdSql.checkData(42, 0, None) + tdSql.checkData(43, 0, 1.5707963267948966) + + tdSql.query('select acos(c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, 1.5707963267948966) + tdSql.checkData(10, 0, 0.0) + tdSql.checkData(11, 0, None) + tdSql.checkData(12, 0, None) + tdSql.checkData(13, 0, None) + tdSql.checkData(14, 0, None) + tdSql.checkData(15, 0, None) + tdSql.checkData(16, 0, None) + tdSql.checkData(17, 0, None) + tdSql.checkData(18, 0, None) + tdSql.checkData(19, 0, 1.5707963267948966) + tdSql.checkData(20, 0, 0.0) + tdSql.checkData(21, 0, None) + tdSql.checkData(22, 0, None) + tdSql.checkData(23, 0, None) + tdSql.checkData(24, 0, None) + tdSql.checkData(25, 0, None) + tdSql.checkData(26, 0, None) + tdSql.checkData(27, 0, None) + tdSql.checkData(28, 0, None) + tdSql.checkData(29, 0, 1.5707963267948966) + + tdSql.query('select acos(min(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select acos(max(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(count(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(sum(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(avg(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(percentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.45102681179626264) + + tdSql.query('select acos(apercentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select acos(stddev(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(spread(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(twa(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(interp(c2)) from tba1 every(1s)') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, None) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, None) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, None) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, None) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, None) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, None) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, None) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, None) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 1.5707963267948966) + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 0.0) + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, None) + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, None) + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, None) + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, None) + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, None) + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, None) + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, None) + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, None) + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 1.5707963267948966) + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 0.0) + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, None) + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, None) + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, None) + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, None) + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, None) + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, None) + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, None) + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, None) + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 1.5707963267948966) + + tdSql.query('select acos(interp(c2)) from stba every(1s) group by tbname;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 0.0) + tdSql.checkData(0, 2, 'tba1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, None) + tdSql.checkData(1, 2, 'tba1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, None) + tdSql.checkData(2, 2, 'tba1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, None) + tdSql.checkData(3, 2, 'tba1') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, None) + tdSql.checkData(4, 2, 'tba1') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, None) + tdSql.checkData(5, 2, 'tba1') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, None) + tdSql.checkData(6, 2, 'tba1') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, None) + tdSql.checkData(7, 2, 'tba1') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, None) + tdSql.checkData(8, 2, 'tba1') + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 1.5707963267948966) + tdSql.checkData(9, 2, 'tba1') + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 0.0) + tdSql.checkData(10, 2, 'tba1') + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, None) + tdSql.checkData(11, 2, 'tba1') + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, None) + tdSql.checkData(12, 2, 'tba1') + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, None) + tdSql.checkData(13, 2, 'tba1') + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, None) + tdSql.checkData(14, 2, 'tba1') + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, None) + tdSql.checkData(15, 2, 'tba1') + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, None) + tdSql.checkData(16, 2, 'tba1') + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, None) + tdSql.checkData(17, 2, 'tba1') + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, None) + tdSql.checkData(18, 2, 'tba1') + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 1.5707963267948966) + tdSql.checkData(19, 2, 'tba1') + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 0.0) + tdSql.checkData(20, 2, 'tba1') + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, None) + tdSql.checkData(21, 2, 'tba1') + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, None) + tdSql.checkData(22, 2, 'tba1') + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, None) + tdSql.checkData(23, 2, 'tba1') + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, None) + tdSql.checkData(24, 2, 'tba1') + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, None) + tdSql.checkData(25, 2, 'tba1') + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, None) + tdSql.checkData(26, 2, 'tba1') + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, None) + tdSql.checkData(27, 2, 'tba1') + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, None) + tdSql.checkData(28, 2, 'tba1') + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 1.5707963267948966) + tdSql.checkData(29, 2, 'tba1') + + tdSql.query('select acos(elapsed(ts)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query('select acos(rate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.4588742050141097) + + tdSql.query('select acos(irate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select acos(first(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select acos(last(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + tdSql.query('select acos(last_row(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5707963267948966) + + #=========== end math_atan2 ========== + + + #=========== begin math_sin ========== + tdSql.query('select sin(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.4201670368266409) + tdSql.checkData(1, 0, 0.4201670368266409) + tdSql.checkData(2, 0, 0.4201670368266409) + tdSql.checkData(3, 0, 0.4201670368266409) + tdSql.checkData(4, 0, 0.4201670368266409) + tdSql.checkData(5, 0, 0.4201670368266409) + tdSql.checkData(6, 0, 0.4201670368266409) + + tdSql.query('select sin(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.972630067242408) + tdSql.checkData(6, 0, -0.972630067242408) + + tdSql.query('select sin(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.18750655394138943) + tdSql.checkData(6, 0, -0.18750655394138943) + + tdSql.query('select sin(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, -0.7249165551445564) + tdSql.checkData(6, 0, 0.7249165551445564) + + tdSql.query('select sin(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.9999303766734422) + tdSql.checkData(6, 0, -0.9999303766734422) + + tdSql.query('select sin(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, -0.5218765233336585) + tdSql.checkData(6, 0, 0.5218765233336585) + + tdSql.query('select sin(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.8728292970885063) + tdSql.checkData(6, 0, -0.8728292970885063) + + tdSql.query('select sin(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.45199889806298343) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sin(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.3683616323063538) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sin(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.9986982434666626) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sin(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.9999303766734422) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select sin(a) from (select sin(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.7456241416655579) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.7890723435728884) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6866002607386249) + tdSql.checkData(5, 0, 0.8263696344332049) + tdSql.checkData(6, 0, -0.8263696344332049) + + tdSql.query('select sin(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.9092974268256817) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.9893582466233818) + tdSql.checkData(5, 0, 0.9989477243796069) + tdSql.checkData(6, 0, -0.9989477243796069) + + tdSql.query('select sin((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.9092974268256817) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.9893582466233818) + tdSql.checkData(5, 0, 0.9989477243796069) + tdSql.checkData(6, 0, -0.9989477243796069) + + tdSql.query('select sin((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.7568024953079282) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.9906073556948704) + tdSql.checkData(5, 0, 0.08341720348826624) + tdSql.checkData(6, 0, 0.9909625452221539) + + tdSql.query('select sin(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 9.793449296524592e-06) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0000097934492964) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 3.0000097934492964) + tdSql.checkData(5, 0, 126.0000097934493) + tdSql.checkData(6, 0, -127.9999902065507) + + tdSql.query('select sin(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 11.841470984807897) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 11.909297426825681) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 10.243197504692072) + tdSql.checkData(5, 0, 11.972630067242408) + tdSql.checkData(6, 0, 10.027369932757592) + + tdSql.query('select sin(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 0.9092974268256817) + tdSql.checkData(1, 0, 0.9893582466233818) + tdSql.checkData(2, 0, 0.9989477243796069) + tdSql.checkData(3, 0, -0.9989477243796069) + + tdSql.query('select sin(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.972630067242408) + tdSql.checkData(1, 0, 0.972630067242408) + tdSql.checkData(2, 0, -0.7568024953079282) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.9092974268256817) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.8414709848078965) + + tdSql.query('select sin(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.9989477243796069) + tdSql.checkData(1, 0, 0.9989477243796069) + tdSql.checkData(2, 0, 0.9893582466233818) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.9092974268256817) + + tdSql.query('select sin(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 0.9893582466233818) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select sin(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.972630067242408) + tdSql.checkData(6, 0, -0.972630067242408) + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.1411200080598672) + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(13, 0, 0.6569865987187891) + + tdSql.query('select sin(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select sin(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select sin(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 0.18750655394138943) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, -0.18750655394138943) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 0.1411200080598672) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 0.6569865987187891) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select sin(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 0.1411200080598672) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, -0.9589242746631385) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, -0.27941549819892586) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 0.6569865987187891) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select sin(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(2, 0, 0.5984721441039565) + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(4, 0, -0.977530117665097) + tdSql.checkData(5, 0, -0.5025573497604873) + tdSql.checkData(6, 0, 0.3048106211022167) + + tdSql.query('select sin(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.972630067242408) + tdSql.checkData(6, 0, -0.972630067242408) + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.1411200080598672) + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(13, 0, 0.6569865987187891) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(2, 0, 0.5984721441039565) + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(4, 0, -0.977530117665097) + tdSql.checkData(5, 0, -0.5025573497604873) + tdSql.checkData(6, 0, 0.3048106211022167) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(2, 0, 0.5984721441039565) + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(4, 0, -0.977530117665097) + tdSql.checkData(5, 0, -0.5025573497604873) + tdSql.checkData(6, 0, 0.3048106211022167) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.3048106211022167) + tdSql.checkData(4, 0, -0.5025573497604873) + tdSql.checkData(5, 0, -0.977530117665097) + tdSql.checkData(6, 0, -0.7568024953079282) + tdSql.checkData(7, 0, 0.5984721441039565) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.8414709848078965) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.5025573497604873) + tdSql.checkData(4, 0, -0.977530117665097) + tdSql.checkData(5, 0, -0.7568024953079282) + tdSql.checkData(6, 0, 0.5984721441039565) + tdSql.checkData(7, 0, 0.9092974268256817) + tdSql.checkData(8, 0, 0.8414709848078965) + tdSql.checkData(9, 0, 0.3048106211022167) + + tdSql.query("select sin(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.3048106211022167) + tdSql.checkData(1, 0, 0.8414709848078965) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, 0.5984721441039565) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, -0.977530117665097) + tdSql.checkData(6, 0, -0.5025573497604873) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select sin(a) from (select sin(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.7456241416655579) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.7890723435728884) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6866002607386249) + tdSql.checkData(5, 0, 0.8263696344332049) + tdSql.checkData(6, 0, -0.8263696344332049) + + tdSql.query('select sin(tb1.c3),sin(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(0, 1, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 0.9092974268256817) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 0.1411200080598672) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(3, 1, -0.7568024953079282) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(4, 1, -0.9589242746631385) + tdSql.checkData(5, 0, 0.18750655394138943) + tdSql.checkData(5, 1, -0.27941549819892586) + tdSql.checkData(6, 0, -0.18750655394138943) + tdSql.checkData(6, 1, 0.6569865987187891) + + tdSql.query('select sin(c3) from tb1 union all select sin(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.1411200080598672) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, 0.18750655394138943) + tdSql.checkData(6, 0, -0.18750655394138943) + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.1411200080598672) + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(13, 0, 0.6569865987187891) + + #=========== end math_sin ========== + + + #=========== begin math_sin2 ========== + tdSql.query('select sin(stb1.c4),sin(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(0, 1, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 0.9092974268256817) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(2, 1, 0.1411200080598672) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, -0.7568024953079282) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(4, 1, -0.9589242746631385) + tdSql.checkData(5, 0, -0.7249165551445564) + tdSql.checkData(5, 1, -0.27941549819892586) + tdSql.checkData(6, 0, 0.7249165551445564) + tdSql.checkData(6, 1, 0.6569865987187891) + + tdSql.query('select sin(c4) as a from stb1 union all select sin(c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9092974268256817) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.7568024953079282) + tdSql.checkData(5, 0, -0.7249165551445564) + tdSql.checkData(6, 0, 0.7249165551445564) + tdSql.checkData(7, 0, 0.8414709848078965) + tdSql.checkData(8, 0, 0.9092974268256817) + tdSql.checkData(9, 0, 0.9092974268256817) + tdSql.checkData(10, 0, -0.7568024953079282) + tdSql.checkData(11, 0, -0.9589242746631385) + tdSql.checkData(12, 0, -0.27941549819892586) + tdSql.checkData(13, 0, 0.6569865987187891) + tdSql.checkData(14, 0, 0.8414709848078965) + tdSql.checkData(15, 0, 0.9092974268256817) + tdSql.checkData(16, 0, 0.1411200080598672) + tdSql.checkData(17, 0, -0.7568024953079282) + tdSql.checkData(18, 0, -0.9589242746631385) + tdSql.checkData(19, 0, -0.27941549819892586) + tdSql.checkData(20, 0, 0.6569865987187891) + tdSql.checkData(21, 0, 0.9893582466233818) + tdSql.checkData(22, 0, 0.4121184852417566) + tdSql.checkData(23, 0, 0.0) + tdSql.checkData(24, 0, 0.8414709848078965) + tdSql.checkData(25, 0, 0.9092974268256817) + tdSql.checkData(26, 0, 0.1411200080598672) + tdSql.checkData(27, 0, -0.7568024953079282) + tdSql.checkData(28, 0, -0.9589242746631385) + tdSql.checkData(29, 0, -0.27941549819892586) + tdSql.checkData(30, 0, 0.6569865987187891) + tdSql.checkData(31, 0, 0.9893582466233818) + tdSql.checkData(32, 0, 0.4121184852417566) + tdSql.checkData(33, 0, 0.0) + tdSql.checkData(34, 0, 0.8414709848078965) + tdSql.checkData(35, 0, 0.9092974268256817) + tdSql.checkData(36, 0, 0.1411200080598672) + tdSql.checkData(37, 0, -0.7568024953079282) + tdSql.checkData(38, 0, -0.9589242746631385) + tdSql.checkData(39, 0, -0.27941549819892586) + tdSql.checkData(40, 0, 0.6569865987187891) + tdSql.checkData(41, 0, 0.9893582466233818) + tdSql.checkData(42, 0, 0.4121184852417566) + tdSql.checkData(43, 0, 0.0) + + tdSql.query('select sin(c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 0.8414709848078965) + tdSql.checkData(1, 0, 0.9092974268256817) + tdSql.checkData(2, 0, 0.1411200080598672) + tdSql.checkData(3, 0, -0.7568024953079282) + tdSql.checkData(4, 0, -0.9589242746631385) + tdSql.checkData(5, 0, -0.27941549819892586) + tdSql.checkData(6, 0, 0.6569865987187891) + tdSql.checkData(7, 0, 0.9893582466233818) + tdSql.checkData(8, 0, 0.4121184852417566) + tdSql.checkData(9, 0, 0.0) + tdSql.checkData(10, 0, 0.8414709848078965) + tdSql.checkData(11, 0, 0.9092974268256817) + tdSql.checkData(12, 0, 0.1411200080598672) + tdSql.checkData(13, 0, -0.7568024953079282) + tdSql.checkData(14, 0, -0.9589242746631385) + tdSql.checkData(15, 0, -0.27941549819892586) + tdSql.checkData(16, 0, 0.6569865987187891) + tdSql.checkData(17, 0, 0.9893582466233818) + tdSql.checkData(18, 0, 0.4121184852417566) + tdSql.checkData(19, 0, 0.0) + tdSql.checkData(20, 0, 0.8414709848078965) + tdSql.checkData(21, 0, 0.9092974268256817) + tdSql.checkData(22, 0, 0.1411200080598672) + tdSql.checkData(23, 0, -0.7568024953079282) + tdSql.checkData(24, 0, -0.9589242746631385) + tdSql.checkData(25, 0, -0.27941549819892586) + tdSql.checkData(26, 0, 0.6569865987187891) + tdSql.checkData(27, 0, 0.9893582466233818) + tdSql.checkData(28, 0, 0.4121184852417566) + tdSql.checkData(29, 0, 0.0) + + tdSql.query('select sin(min(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select sin(max(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.4121184852417566) + + tdSql.query('select sin(count(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.9880316240928618) + + tdSql.query('select sin(sum(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.08836868610400143) + + tdSql.query('select sin(avg(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.977530117665097) + + tdSql.query('select sin(percentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.7833269096274833) + + tdSql.query('select sin(apercentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select sin(stddev(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.266067653696438) + + tdSql.query('select sin(spread(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.4121184852417566) + + tdSql.query('select sin(twa(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.9972292875667882) + + tdSql.query('select sin(interp(c2)) from tba1 every(1s)') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 0.8414709848078965) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, 0.9092974268256817) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, 0.1411200080598672) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, -0.7568024953079282) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, -0.9589242746631385) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, -0.27941549819892586) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 0.6569865987187891) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, 0.9893582466233818) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, 0.4121184852417566) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0.0) + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 0.8414709848078965) + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, 0.9092974268256817) + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, 0.1411200080598672) + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, -0.7568024953079282) + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, -0.9589242746631385) + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, -0.27941549819892586) + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 0.6569865987187891) + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, 0.9893582466233818) + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, 0.4121184852417566) + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0.0) + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 0.8414709848078965) + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, 0.9092974268256817) + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, 0.1411200080598672) + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, -0.7568024953079282) + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, -0.9589242746631385) + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, -0.27941549819892586) + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 0.6569865987187891) + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, 0.9893582466233818) + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, 0.4121184852417566) + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0.0) + + tdSql.query('select sin(interp(c2)) from stba every(1s) group by tbname;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 0.8414709848078965) + tdSql.checkData(0, 2, 'tba1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, 0.9092974268256817) + tdSql.checkData(1, 2, 'tba1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, 0.1411200080598672) + tdSql.checkData(2, 2, 'tba1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, -0.7568024953079282) + tdSql.checkData(3, 2, 'tba1') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, -0.9589242746631385) + tdSql.checkData(4, 2, 'tba1') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, -0.27941549819892586) + tdSql.checkData(5, 2, 'tba1') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 0.6569865987187891) + tdSql.checkData(6, 2, 'tba1') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, 0.9893582466233818) + tdSql.checkData(7, 2, 'tba1') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, 0.4121184852417566) + tdSql.checkData(8, 2, 'tba1') + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0.0) + tdSql.checkData(9, 2, 'tba1') + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 0.8414709848078965) + tdSql.checkData(10, 2, 'tba1') + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, 0.9092974268256817) + tdSql.checkData(11, 2, 'tba1') + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, 0.1411200080598672) + tdSql.checkData(12, 2, 'tba1') + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, -0.7568024953079282) + tdSql.checkData(13, 2, 'tba1') + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, -0.9589242746631385) + tdSql.checkData(14, 2, 'tba1') + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, -0.27941549819892586) + tdSql.checkData(15, 2, 'tba1') + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 0.6569865987187891) + tdSql.checkData(16, 2, 'tba1') + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, 0.9893582466233818) + tdSql.checkData(17, 2, 'tba1') + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, 0.4121184852417566) + tdSql.checkData(18, 2, 'tba1') + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0.0) + tdSql.checkData(19, 2, 'tba1') + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 0.8414709848078965) + tdSql.checkData(20, 2, 'tba1') + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, 0.9092974268256817) + tdSql.checkData(21, 2, 'tba1') + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, 0.1411200080598672) + tdSql.checkData(22, 2, 'tba1') + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, -0.7568024953079282) + tdSql.checkData(23, 2, 'tba1') + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, -0.9589242746631385) + tdSql.checkData(24, 2, 'tba1') + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, -0.27941549819892586) + tdSql.checkData(25, 2, 'tba1') + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 0.6569865987187891) + tdSql.checkData(26, 2, 'tba1') + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, 0.9893582466233818) + tdSql.checkData(27, 2, 'tba1') + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, 0.4121184852417566) + tdSql.checkData(28, 2, 'tba1') + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0.0) + tdSql.checkData(29, 2, 'tba1') + + tdSql.query('select sin(elapsed(ts)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.04177312885292625) + + tdSql.query('select sin(rate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.7811787741147804) + + tdSql.query('select sin(irate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select sin(first(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.8414709848078965) + + tdSql.query('select sin(last(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select sin(last_row(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + #=========== end math_sin2 ========== + + + #=========== begin math_cos ========== + tdSql.query('select cos(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.9074467814501962) + tdSql.checkData(1, 0, 0.9074467814501962) + tdSql.checkData(2, 0, 0.9074467814501962) + tdSql.checkData(3, 0, 0.9074467814501962) + tdSql.checkData(4, 0, 0.9074467814501962) + tdSql.checkData(5, 0, 0.9074467814501962) + tdSql.checkData(6, 0, 0.9074467814501962) + + tdSql.query('select cos(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.23235910202965793) + tdSql.checkData(6, 0, 0.23235910202965793) + + tdSql.query('select cos(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.9822633517692823) + tdSql.checkData(6, 0, 0.9822633517692823) + + tdSql.query('select cos(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.6888366918779438) + tdSql.checkData(6, 0, -0.6888366918779438) + + tdSql.query('select cos(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.011800076512800236) + tdSql.checkData(6, 0, 0.011800076512800236) + + tdSql.query('select cos(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.8530210398303042) + tdSql.checkData(6, 0, 0.8530210398303042) + + tdSql.query('select cos(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.4880256326710555) + tdSql.checkData(6, 0, 0.4880256326710555) + + tdSql.query('select cos(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.892018495407942) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select cos(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.9296825844580496) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select cos(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.051008023845301335) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select cos(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.011800076512800236) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select cos(a) from (select cos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8575532158463934) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9146533258523714) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.7938734492261525) + tdSql.checkData(5, 0, 0.9731258638638398) + tdSql.checkData(6, 0, 0.9731258638638398) + + tdSql.query('select cos(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.4161468365471424) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.14550003380861354) + tdSql.checkData(5, 0, 0.04586331820534665) + tdSql.checkData(6, 0, 0.04586331820534665) + + tdSql.query('select cos((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.4161468365471424) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.14550003380861354) + tdSql.checkData(5, 0, 0.04586331820534665) + tdSql.checkData(6, 0, 0.04586331820534665) + + tdSql.query('select cos((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -0.6536436208636119) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.1367372182078336) + tdSql.checkData(5, 0, -0.9965147114630055) + tdSql.checkData(6, 0, 0.13413886076313122) + + tdSql.query('select cos(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0044256979880508) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2.004425697988051) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4.004425697988051) + tdSql.checkData(5, 0, 127.00442569798805) + tdSql.checkData(6, 0, -126.99557430201195) + + tdSql.query('select cos(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 11.54030230586814) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 10.583853163452858) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 10.346356379136388) + tdSql.checkData(5, 0, 11.232359102029658) + tdSql.checkData(6, 0, 11.232359102029658) + + tdSql.query('select cos(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, -0.4161468365471424) + tdSql.checkData(1, 0, -0.14550003380861354) + tdSql.checkData(2, 0, 0.04586331820534665) + tdSql.checkData(3, 0, 0.04586331820534665) + + tdSql.query('select cos(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.23235910202965793) + tdSql.checkData(1, 0, 0.23235910202965793) + tdSql.checkData(2, 0, -0.6536436208636119) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.4161468365471424) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.5403023058681398) + + tdSql.query('select cos(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.04586331820534665) + tdSql.checkData(1, 0, 0.04586331820534665) + tdSql.checkData(2, 0, -0.14550003380861354) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, -0.4161468365471424) + + tdSql.query('select cos(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, -0.14550003380861354) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select cos(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.23235910202965793) + tdSql.checkData(6, 0, 0.23235910202965793) + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, -0.9899924966004454) + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(13, 0, 0.7539022543433046) + + tdSql.query('select cos(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select cos(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select cos(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 0.9822633517692823) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, 0.9822633517692823) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, -0.9899924966004454) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 0.7539022543433046) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select cos(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, -0.9899924966004454) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 0.28366218546322625) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 0.960170286650366) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 0.7539022543433046) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select cos(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(2, 0, -0.8011436155469337) + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(4, 0, -0.2107957994307797) + tdSql.checkData(5, 0, -0.8645438740756395) + tdSql.checkData(6, 0, -0.9524129804151563) + + tdSql.query('select cos(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.23235910202965793) + tdSql.checkData(6, 0, 0.23235910202965793) + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, -0.9899924966004454) + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(13, 0, 0.7539022543433046) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(2, 0, -0.8011436155469337) + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(4, 0, -0.2107957994307797) + tdSql.checkData(5, 0, -0.8645438740756395) + tdSql.checkData(6, 0, -0.9524129804151563) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(2, 0, -0.8011436155469337) + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(4, 0, -0.2107957994307797) + tdSql.checkData(5, 0, -0.8645438740756395) + tdSql.checkData(6, 0, -0.9524129804151563) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9524129804151563) + tdSql.checkData(4, 0, -0.8645438740756395) + tdSql.checkData(5, 0, -0.2107957994307797) + tdSql.checkData(6, 0, -0.6536436208636119) + tdSql.checkData(7, 0, -0.8011436155469337) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, 0.5403023058681398) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.8645438740756395) + tdSql.checkData(4, 0, -0.2107957994307797) + tdSql.checkData(5, 0, -0.6536436208636119) + tdSql.checkData(6, 0, -0.8011436155469337) + tdSql.checkData(7, 0, -0.4161468365471424) + tdSql.checkData(8, 0, 0.5403023058681398) + tdSql.checkData(9, 0, -0.9524129804151563) + + tdSql.query("select cos(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, -0.9524129804151563) + tdSql.checkData(1, 0, 0.5403023058681398) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, -0.8011436155469337) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.2107957994307797) + tdSql.checkData(6, 0, -0.8645438740756395) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select cos(a) from (select cos(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.8575532158463934) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.9146533258523714) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.7938734492261525) + tdSql.checkData(5, 0, 0.9731258638638398) + tdSql.checkData(6, 0, 0.9731258638638398) + + tdSql.query('select cos(tb1.c3),cos(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(0, 1, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, -0.4161468365471424) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, -0.9899924966004454) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(3, 1, -0.6536436208636119) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(4, 1, 0.28366218546322625) + tdSql.checkData(5, 0, 0.9822633517692823) + tdSql.checkData(5, 1, 0.960170286650366) + tdSql.checkData(6, 0, 0.9822633517692823) + tdSql.checkData(6, 1, 0.7539022543433046) + + tdSql.query('select cos(c3) from tb1 union all select cos(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.9899924966004454) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, 0.9822633517692823) + tdSql.checkData(6, 0, 0.9822633517692823) + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, -0.9899924966004454) + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(13, 0, 0.7539022543433046) + + #=========== end math_cos ========== + + + #=========== begin math_cos2 ========== + tdSql.query('select cos(stb1.c4),cos(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(0, 1, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, -0.4161468365471424) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(2, 1, -0.9899924966004454) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, -0.6536436208636119) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(4, 1, 0.28366218546322625) + tdSql.checkData(5, 0, -0.6888366918779438) + tdSql.checkData(5, 1, 0.960170286650366) + tdSql.checkData(6, 0, -0.6888366918779438) + tdSql.checkData(6, 1, 0.7539022543433046) + + tdSql.query('select cos(c4) as a from stb1 union all select cos(c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -0.4161468365471424) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -0.6536436208636119) + tdSql.checkData(5, 0, -0.6888366918779438) + tdSql.checkData(6, 0, -0.6888366918779438) + tdSql.checkData(7, 0, 0.5403023058681398) + tdSql.checkData(8, 0, -0.4161468365471424) + tdSql.checkData(9, 0, -0.4161468365471424) + tdSql.checkData(10, 0, -0.6536436208636119) + tdSql.checkData(11, 0, 0.28366218546322625) + tdSql.checkData(12, 0, 0.960170286650366) + tdSql.checkData(13, 0, 0.7539022543433046) + tdSql.checkData(14, 0, 0.5403023058681398) + tdSql.checkData(15, 0, -0.4161468365471424) + tdSql.checkData(16, 0, -0.9899924966004454) + tdSql.checkData(17, 0, -0.6536436208636119) + tdSql.checkData(18, 0, 0.28366218546322625) + tdSql.checkData(19, 0, 0.960170286650366) + tdSql.checkData(20, 0, 0.7539022543433046) + tdSql.checkData(21, 0, -0.14550003380861354) + tdSql.checkData(22, 0, -0.9111302618846769) + tdSql.checkData(23, 0, 1.0) + tdSql.checkData(24, 0, 0.5403023058681398) + tdSql.checkData(25, 0, -0.4161468365471424) + tdSql.checkData(26, 0, -0.9899924966004454) + tdSql.checkData(27, 0, -0.6536436208636119) + tdSql.checkData(28, 0, 0.28366218546322625) + tdSql.checkData(29, 0, 0.960170286650366) + tdSql.checkData(30, 0, 0.7539022543433046) + tdSql.checkData(31, 0, -0.14550003380861354) + tdSql.checkData(32, 0, -0.9111302618846769) + tdSql.checkData(33, 0, 1.0) + tdSql.checkData(34, 0, 0.5403023058681398) + tdSql.checkData(35, 0, -0.4161468365471424) + tdSql.checkData(36, 0, -0.9899924966004454) + tdSql.checkData(37, 0, -0.6536436208636119) + tdSql.checkData(38, 0, 0.28366218546322625) + tdSql.checkData(39, 0, 0.960170286650366) + tdSql.checkData(40, 0, 0.7539022543433046) + tdSql.checkData(41, 0, -0.14550003380861354) + tdSql.checkData(42, 0, -0.9111302618846769) + tdSql.checkData(43, 0, 1.0) + + tdSql.query('select cos(c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 0.5403023058681398) + tdSql.checkData(1, 0, -0.4161468365471424) + tdSql.checkData(2, 0, -0.9899924966004454) + tdSql.checkData(3, 0, -0.6536436208636119) + tdSql.checkData(4, 0, 0.28366218546322625) + tdSql.checkData(5, 0, 0.960170286650366) + tdSql.checkData(6, 0, 0.7539022543433046) + tdSql.checkData(7, 0, -0.14550003380861354) + tdSql.checkData(8, 0, -0.9111302618846769) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 0.5403023058681398) + tdSql.checkData(11, 0, -0.4161468365471424) + tdSql.checkData(12, 0, -0.9899924966004454) + tdSql.checkData(13, 0, -0.6536436208636119) + tdSql.checkData(14, 0, 0.28366218546322625) + tdSql.checkData(15, 0, 0.960170286650366) + tdSql.checkData(16, 0, 0.7539022543433046) + tdSql.checkData(17, 0, -0.14550003380861354) + tdSql.checkData(18, 0, -0.9111302618846769) + tdSql.checkData(19, 0, 1.0) + tdSql.checkData(20, 0, 0.5403023058681398) + tdSql.checkData(21, 0, -0.4161468365471424) + tdSql.checkData(22, 0, -0.9899924966004454) + tdSql.checkData(23, 0, -0.6536436208636119) + tdSql.checkData(24, 0, 0.28366218546322625) + tdSql.checkData(25, 0, 0.960170286650366) + tdSql.checkData(26, 0, 0.7539022543433046) + tdSql.checkData(27, 0, -0.14550003380861354) + tdSql.checkData(28, 0, -0.9111302618846769) + tdSql.checkData(29, 0, 1.0) + + tdSql.query('select cos(min(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.0) + + tdSql.query('select cos(max(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.9111302618846769) + + tdSql.query('select cos(count(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.15425144988758405) + + tdSql.query('select cos(sum(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.9960878351411849) + + tdSql.query('select cos(avg(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.2107957994307797) + + tdSql.query('select cos(percentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.6216099682706645) + + tdSql.query('select cos(apercentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.0) + + tdSql.query('select cos(stddev(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.9639543576624737) + + tdSql.query('select cos(spread(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.9111302618846769) + + tdSql.query('select cos(twa(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.07438916600578388) + + tdSql.query('select cos(interp(c2)) from tba1 every(1s)') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 0.5403023058681398) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, -0.4161468365471424) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, -0.9899924966004454) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, -0.6536436208636119) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, 0.28366218546322625) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, 0.960170286650366) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 0.7539022543433046) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, -0.14550003380861354) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, -0.9111302618846769) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 1.0) + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 0.5403023058681398) + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, -0.4161468365471424) + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, -0.9899924966004454) + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, -0.6536436208636119) + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, 0.28366218546322625) + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, 0.960170286650366) + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 0.7539022543433046) + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, -0.14550003380861354) + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, -0.9111302618846769) + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 1.0) + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 0.5403023058681398) + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, -0.4161468365471424) + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, -0.9899924966004454) + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, -0.6536436208636119) + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, 0.28366218546322625) + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, 0.960170286650366) + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 0.7539022543433046) + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, -0.14550003380861354) + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, -0.9111302618846769) + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 1.0) + + tdSql.query('select cos(interp(c2)) from stba every(1s) group by tbname;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 0.5403023058681398) + tdSql.checkData(0, 2, 'tba1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, -0.4161468365471424) + tdSql.checkData(1, 2, 'tba1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, -0.9899924966004454) + tdSql.checkData(2, 2, 'tba1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, -0.6536436208636119) + tdSql.checkData(3, 2, 'tba1') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, 0.28366218546322625) + tdSql.checkData(4, 2, 'tba1') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, 0.960170286650366) + tdSql.checkData(5, 2, 'tba1') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 0.7539022543433046) + tdSql.checkData(6, 2, 'tba1') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, -0.14550003380861354) + tdSql.checkData(7, 2, 'tba1') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, -0.9111302618846769) + tdSql.checkData(8, 2, 'tba1') + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 1.0) + tdSql.checkData(9, 2, 'tba1') + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 0.5403023058681398) + tdSql.checkData(10, 2, 'tba1') + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, -0.4161468365471424) + tdSql.checkData(11, 2, 'tba1') + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, -0.9899924966004454) + tdSql.checkData(12, 2, 'tba1') + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, -0.6536436208636119) + tdSql.checkData(13, 2, 'tba1') + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, 0.28366218546322625) + tdSql.checkData(14, 2, 'tba1') + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, 0.960170286650366) + tdSql.checkData(15, 2, 'tba1') + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 0.7539022543433046) + tdSql.checkData(16, 2, 'tba1') + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, -0.14550003380861354) + tdSql.checkData(17, 2, 'tba1') + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, -0.9111302618846769) + tdSql.checkData(18, 2, 'tba1') + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 1.0) + tdSql.checkData(19, 2, 'tba1') + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 0.5403023058681398) + tdSql.checkData(20, 2, 'tba1') + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, -0.4161468365471424) + tdSql.checkData(21, 2, 'tba1') + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, -0.9899924966004454) + tdSql.checkData(22, 2, 'tba1') + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, -0.6536436208636119) + tdSql.checkData(23, 2, 'tba1') + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, 0.28366218546322625) + tdSql.checkData(24, 2, 'tba1') + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, 0.960170286650366) + tdSql.checkData(25, 2, 'tba1') + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 0.7539022543433046) + tdSql.checkData(26, 2, 'tba1') + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, -0.14550003380861354) + tdSql.checkData(27, 2, 'tba1') + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, -0.9111302618846769) + tdSql.checkData(28, 2, 'tba1') + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 1.0) + tdSql.checkData(29, 2, 'tba1') + + tdSql.query('select cos(elapsed(ts)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.9991271218948251) + + tdSql.query('select cos(rate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.6243073945361602) + + tdSql.query('select cos(irate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.0) + + tdSql.query('select cos(first(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.5403023058681398) + + tdSql.query('select cos(last(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.0) + + tdSql.query('select cos(last_row(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.0) + + #=========== end math_cos2 ========== + + + #=========== begin math_tan ========== + tdSql.query('select tan(13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.4630211329364896) + tdSql.checkData(1, 0, 0.4630211329364896) + tdSql.checkData(2, 0, 0.4630211329364896) + tdSql.checkData(3, 0, 0.4630211329364896) + tdSql.checkData(4, 0, 0.4630211329364896) + tdSql.checkData(5, 0, 0.4630211329364896) + tdSql.checkData(6, 0, 0.4630211329364896) + + tdSql.query('select tan(c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 4.185891831851989) + tdSql.checkData(6, 0, -4.185891831851989) + + tdSql.query('select tan(c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 0.19089234430221486) + tdSql.checkData(6, 0, -0.19089234430221486) + + tdSql.query('select tan(c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 1.0523779637351338) + tdSql.checkData(6, 0, -1.0523779637351338) + + tdSql.query('select tan(c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 84.73931296875567) + tdSql.checkData(6, 0, -84.73931296875567) + + tdSql.query('select tan(c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, -0.6117979498342481) + tdSql.checkData(6, 0, 0.6117979498342481) + + tdSql.query('select tan(c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 1.7884906829818519) + tdSql.checkData(6, 0, -1.7884906829818519) + + tdSql.query('select tan(c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, -0.5067147154345417) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select tan(c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 0.39622301037411284) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select tan(c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, -19.579238091943036) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select tan(c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 84.73931296875567) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select tan(a) from (select tan(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 74.68593339876537) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4179285755053868) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.282204450191367) + tdSql.checkData(5, 0, 1.7205151938006633) + tdSql.checkData(6, 0, -1.7205151938006633) + + tdSql.query('select tan(c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -2.185039863261519) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -6.799711455220379) + tdSql.checkData(5, 0, 21.780973629229287) + tdSql.checkData(6, 0, -21.780973629229287) + + tdSql.query('select tan((c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -2.185039863261519) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -6.799711455220379) + tdSql.checkData(5, 0, 21.780973629229287) + tdSql.checkData(6, 0, -21.780973629229287) + + tdSql.query('select tan((c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -1.1578212823495775) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 7.2446066160948055) + tdSql.checkData(5, 0, -0.08370895334379919) + tdSql.checkData(6, 0, 7.387587307544252) + + tdSql.query('select tan(11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -224.95084645419513) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -223.95084645419513) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -221.95084645419513) + tdSql.checkData(5, 0, -98.95084645419513) + tdSql.checkData(6, 0, -352.95084645419513) + + tdSql.query('select tan(c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.557407724654903) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 8.814960136738481) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 12.157821282349577) + tdSql.checkData(5, 0, 15.18589183185199) + tdSql.checkData(6, 0, 6.814108168148011) + + tdSql.query('select tan(c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, -2.185039863261519) + tdSql.checkData(1, 0, -6.799711455220379) + tdSql.checkData(2, 0, 21.780973629229287) + tdSql.checkData(3, 0, -21.780973629229287) + + tdSql.query('select tan(c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -4.185891831851989) + tdSql.checkData(1, 0, 4.185891831851989) + tdSql.checkData(2, 0, 1.1578212823495775) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, -2.185039863261519) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.5574077246549023) + + tdSql.query('select tan(c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -21.780973629229287) + tdSql.checkData(1, 0, 21.780973629229287) + tdSql.checkData(2, 0, -6.799711455220379) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, -2.185039863261519) + + tdSql.query('select tan(c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, -6.799711455220379) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select tan(c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 4.185891831851989) + tdSql.checkData(6, 0, -4.185891831851989) + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, -0.1425465430742778) + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(13, 0, 0.8714479827243188) + + tdSql.query('select tan(c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select tan(c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select tan(c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 0.19089234430221486) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, -0.19089234430221486) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, -0.1425465430742778) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 0.8714479827243188) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select tan(c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, -0.1425465430742778) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, -3.380515006246586) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, -0.29100619138474915) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 0.8714479827243188) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select tan(a) from (select avg(c2) as a from stb1 interval(1s));') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(2, 0, -0.7470222972386603) + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(4, 0, 4.637332054551185) + tdSql.checkData(5, 0, 0.5812976817374548) + tdSql.checkData(6, 0, -0.320040389379563) + + tdSql.query('select tan(c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 4.185891831851989) + tdSql.checkData(6, 0, -4.185891831851989) + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, -0.1425465430742778) + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(13, 0, 0.8714479827243188) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null));") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(2, 0, -0.7470222972386603) + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(4, 0, 4.637332054551185) + tdSql.checkData(5, 0, 0.5812976817374548) + tdSql.checkData(6, 0, -0.320040389379563) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(2, 0, -0.7470222972386603) + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(4, 0, 4.637332054551185) + tdSql.checkData(5, 0, 0.5812976817374548) + tdSql.checkData(6, 0, -0.320040389379563) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by ts desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.320040389379563) + tdSql.checkData(4, 0, 0.5812976817374548) + tdSql.checkData(5, 0, 4.637332054551185) + tdSql.checkData(6, 0, 1.1578212823495775) + tdSql.checkData(7, 0, -0.7470222972386603) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, 1.5574077246549023) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a desc;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, 0.5812976817374548) + tdSql.checkData(4, 0, 4.637332054551185) + tdSql.checkData(5, 0, 1.1578212823495775) + tdSql.checkData(6, 0, -0.7470222972386603) + tdSql.checkData(7, 0, -2.185039863261519) + tdSql.checkData(8, 0, 1.5574077246549023) + tdSql.checkData(9, 0, -0.320040389379563) + + tdSql.query("select tan(a) from (select avg(c2) as a from stb1 where ts >= '2021-11-11 09:00:00.000' and ts <= '2021-11-11 09:00:09.000' interval(1s) fill(null)) order by a;") + tdSql.checkRows(10) + tdSql.checkData(0, 0, -0.320040389379563) + tdSql.checkData(1, 0, 1.5574077246549023) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, -0.7470222972386603) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 4.637332054551185) + tdSql.checkData(6, 0, 0.5812976817374548) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, None) + tdSql.checkData(9, 0, None) + + tdSql.query('select tan(a) from (select tan(c2) as a from tb1);') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 74.68593339876537) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.4179285755053868) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 2.282204450191367) + tdSql.checkData(5, 0, 1.7205151938006633) + tdSql.checkData(6, 0, -1.7205151938006633) + + tdSql.query('select tan(tb1.c3),tan(tb2.c3) from tb1,tb2 where tb1.ts=tb2.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(0, 1, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, -2.185039863261519) + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, -0.1425465430742778) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(3, 1, 1.1578212823495775) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(4, 1, -3.380515006246586) + tdSql.checkData(5, 0, 0.19089234430221486) + tdSql.checkData(5, 1, -0.29100619138474915) + tdSql.checkData(6, 0, -0.19089234430221486) + tdSql.checkData(6, 1, 0.8714479827243188) + + tdSql.query('select tan(c3) from tb1 union all select tan(c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, -0.1425465430742778) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 0.19089234430221486) + tdSql.checkData(6, 0, -0.19089234430221486) + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, -0.1425465430742778) + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(13, 0, 0.8714479827243188) + + #=========== end math_tan ========== + + + #=========== begin math_tan2 ========== + tdSql.query('select tan(stb1.c4),tan(stba.c5) from stb1,stba where stb1.t1=stba.t1 and stb1.ts=stba.ts;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(0, 1, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, -2.185039863261519) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(2, 1, -0.1425465430742778) + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 1.1578212823495775) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(4, 1, -3.380515006246586) + tdSql.checkData(5, 0, 1.0523779637351338) + tdSql.checkData(5, 1, -0.29100619138474915) + tdSql.checkData(6, 0, -1.0523779637351338) + tdSql.checkData(6, 1, 0.8714479827243188) + + tdSql.query('select tan(c4) as a from stb1 union all select tan(c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, -2.185039863261519) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.1578212823495775) + tdSql.checkData(5, 0, 1.0523779637351338) + tdSql.checkData(6, 0, -1.0523779637351338) + tdSql.checkData(7, 0, 1.5574077246549023) + tdSql.checkData(8, 0, -2.185039863261519) + tdSql.checkData(9, 0, -2.185039863261519) + tdSql.checkData(10, 0, 1.1578212823495775) + tdSql.checkData(11, 0, -3.380515006246586) + tdSql.checkData(12, 0, -0.29100619138474915) + tdSql.checkData(13, 0, 0.8714479827243188) + tdSql.checkData(14, 0, 1.5574077246549023) + tdSql.checkData(15, 0, -2.185039863261519) + tdSql.checkData(16, 0, -0.1425465430742778) + tdSql.checkData(17, 0, 1.1578212823495775) + tdSql.checkData(18, 0, -3.380515006246586) + tdSql.checkData(19, 0, -0.29100619138474915) + tdSql.checkData(20, 0, 0.8714479827243188) + tdSql.checkData(21, 0, -6.799711455220379) + tdSql.checkData(22, 0, -0.45231565944180985) + tdSql.checkData(23, 0, 0.0) + tdSql.checkData(24, 0, 1.5574077246549023) + tdSql.checkData(25, 0, -2.185039863261519) + tdSql.checkData(26, 0, -0.1425465430742778) + tdSql.checkData(27, 0, 1.1578212823495775) + tdSql.checkData(28, 0, -3.380515006246586) + tdSql.checkData(29, 0, -0.29100619138474915) + tdSql.checkData(30, 0, 0.8714479827243188) + tdSql.checkData(31, 0, -6.799711455220379) + tdSql.checkData(32, 0, -0.45231565944180985) + tdSql.checkData(33, 0, 0.0) + tdSql.checkData(34, 0, 1.5574077246549023) + tdSql.checkData(35, 0, -2.185039863261519) + tdSql.checkData(36, 0, -0.1425465430742778) + tdSql.checkData(37, 0, 1.1578212823495775) + tdSql.checkData(38, 0, -3.380515006246586) + tdSql.checkData(39, 0, -0.29100619138474915) + tdSql.checkData(40, 0, 0.8714479827243188) + tdSql.checkData(41, 0, -6.799711455220379) + tdSql.checkData(42, 0, -0.45231565944180985) + tdSql.checkData(43, 0, 0.0) + + tdSql.query('select tan(c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 1.5574077246549023) + tdSql.checkData(1, 0, -2.185039863261519) + tdSql.checkData(2, 0, -0.1425465430742778) + tdSql.checkData(3, 0, 1.1578212823495775) + tdSql.checkData(4, 0, -3.380515006246586) + tdSql.checkData(5, 0, -0.29100619138474915) + tdSql.checkData(6, 0, 0.8714479827243188) + tdSql.checkData(7, 0, -6.799711455220379) + tdSql.checkData(8, 0, -0.45231565944180985) + tdSql.checkData(9, 0, 0.0) + tdSql.checkData(10, 0, 1.5574077246549023) + tdSql.checkData(11, 0, -2.185039863261519) + tdSql.checkData(12, 0, -0.1425465430742778) + tdSql.checkData(13, 0, 1.1578212823495775) + tdSql.checkData(14, 0, -3.380515006246586) + tdSql.checkData(15, 0, -0.29100619138474915) + tdSql.checkData(16, 0, 0.8714479827243188) + tdSql.checkData(17, 0, -6.799711455220379) + tdSql.checkData(18, 0, -0.45231565944180985) + tdSql.checkData(19, 0, 0.0) + tdSql.checkData(20, 0, 1.5574077246549023) + tdSql.checkData(21, 0, -2.185039863261519) + tdSql.checkData(22, 0, -0.1425465430742778) + tdSql.checkData(23, 0, 1.1578212823495775) + tdSql.checkData(24, 0, -3.380515006246586) + tdSql.checkData(25, 0, -0.29100619138474915) + tdSql.checkData(26, 0, 0.8714479827243188) + tdSql.checkData(27, 0, -6.799711455220379) + tdSql.checkData(28, 0, -0.45231565944180985) + tdSql.checkData(29, 0, 0.0) + + tdSql.query('select tan(min(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select tan(max(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.45231565944180985) + + tdSql.query('select tan(count(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -6.405331196646276) + + tdSql.query('select tan(sum(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.08871575677006045) + + tdSql.query('select tan(avg(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 4.637332054551185) + + tdSql.query('select tan(percentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.260158217550339) + + tdSql.query('select tan(apercentile(c2, 10)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select tan(stddev(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.27601685866292947) + + tdSql.query('select tan(spread(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.45231565944180985) + + tdSql.query('select tan(twa(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 13.40557155176672) + + tdSql.query('select tan(interp(c2)) from tba1 every(1s)') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 1.5574077246549023) + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, -2.185039863261519) + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, -0.1425465430742778) + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, 1.1578212823495775) + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, -3.380515006246586) + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, -0.29100619138474915) + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 0.8714479827243188) + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, -6.799711455220379) + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, -0.45231565944180985) + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0.0) + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 1.5574077246549023) + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, -2.185039863261519) + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, -0.1425465430742778) + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, 1.1578212823495775) + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, -3.380515006246586) + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, -0.29100619138474915) + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 0.8714479827243188) + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, -6.799711455220379) + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, -0.45231565944180985) + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0.0) + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 1.5574077246549023) + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, -2.185039863261519) + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, -0.1425465430742778) + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, 1.1578212823495775) + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, -3.380515006246586) + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, -0.29100619138474915) + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 0.8714479827243188) + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, -6.799711455220379) + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, -0.45231565944180985) + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0.0) + + tdSql.query('select tan(interp(c2)) from stba every(1s) group by tbname;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, datetime.datetime(2021, 11, 11, 9, 0)) + tdSql.checkData(0, 1, 1.5574077246549023) + tdSql.checkData(0, 2, 'tba1') + tdSql.checkData(1, 0, datetime.datetime(2021, 11, 11, 9, 0, 1)) + tdSql.checkData(1, 1, -2.185039863261519) + tdSql.checkData(1, 2, 'tba1') + tdSql.checkData(2, 0, datetime.datetime(2021, 11, 11, 9, 0, 2)) + tdSql.checkData(2, 1, -0.1425465430742778) + tdSql.checkData(2, 2, 'tba1') + tdSql.checkData(3, 0, datetime.datetime(2021, 11, 11, 9, 0, 3)) + tdSql.checkData(3, 1, 1.1578212823495775) + tdSql.checkData(3, 2, 'tba1') + tdSql.checkData(4, 0, datetime.datetime(2021, 11, 11, 9, 0, 4)) + tdSql.checkData(4, 1, -3.380515006246586) + tdSql.checkData(4, 2, 'tba1') + tdSql.checkData(5, 0, datetime.datetime(2021, 11, 11, 9, 0, 5)) + tdSql.checkData(5, 1, -0.29100619138474915) + tdSql.checkData(5, 2, 'tba1') + tdSql.checkData(6, 0, datetime.datetime(2021, 11, 11, 9, 0, 6)) + tdSql.checkData(6, 1, 0.8714479827243188) + tdSql.checkData(6, 2, 'tba1') + tdSql.checkData(7, 0, datetime.datetime(2021, 11, 11, 9, 0, 7)) + tdSql.checkData(7, 1, -6.799711455220379) + tdSql.checkData(7, 2, 'tba1') + tdSql.checkData(8, 0, datetime.datetime(2021, 11, 11, 9, 0, 8)) + tdSql.checkData(8, 1, -0.45231565944180985) + tdSql.checkData(8, 2, 'tba1') + tdSql.checkData(9, 0, datetime.datetime(2021, 11, 11, 9, 0, 9)) + tdSql.checkData(9, 1, 0.0) + tdSql.checkData(9, 2, 'tba1') + tdSql.checkData(10, 0, datetime.datetime(2021, 11, 11, 9, 0, 10)) + tdSql.checkData(10, 1, 1.5574077246549023) + tdSql.checkData(10, 2, 'tba1') + tdSql.checkData(11, 0, datetime.datetime(2021, 11, 11, 9, 0, 11)) + tdSql.checkData(11, 1, -2.185039863261519) + tdSql.checkData(11, 2, 'tba1') + tdSql.checkData(12, 0, datetime.datetime(2021, 11, 11, 9, 0, 12)) + tdSql.checkData(12, 1, -0.1425465430742778) + tdSql.checkData(12, 2, 'tba1') + tdSql.checkData(13, 0, datetime.datetime(2021, 11, 11, 9, 0, 13)) + tdSql.checkData(13, 1, 1.1578212823495775) + tdSql.checkData(13, 2, 'tba1') + tdSql.checkData(14, 0, datetime.datetime(2021, 11, 11, 9, 0, 14)) + tdSql.checkData(14, 1, -3.380515006246586) + tdSql.checkData(14, 2, 'tba1') + tdSql.checkData(15, 0, datetime.datetime(2021, 11, 11, 9, 0, 15)) + tdSql.checkData(15, 1, -0.29100619138474915) + tdSql.checkData(15, 2, 'tba1') + tdSql.checkData(16, 0, datetime.datetime(2021, 11, 11, 9, 0, 16)) + tdSql.checkData(16, 1, 0.8714479827243188) + tdSql.checkData(16, 2, 'tba1') + tdSql.checkData(17, 0, datetime.datetime(2021, 11, 11, 9, 0, 17)) + tdSql.checkData(17, 1, -6.799711455220379) + tdSql.checkData(17, 2, 'tba1') + tdSql.checkData(18, 0, datetime.datetime(2021, 11, 11, 9, 0, 18)) + tdSql.checkData(18, 1, -0.45231565944180985) + tdSql.checkData(18, 2, 'tba1') + tdSql.checkData(19, 0, datetime.datetime(2021, 11, 11, 9, 0, 19)) + tdSql.checkData(19, 1, 0.0) + tdSql.checkData(19, 2, 'tba1') + tdSql.checkData(20, 0, datetime.datetime(2021, 11, 11, 9, 0, 20)) + tdSql.checkData(20, 1, 1.5574077246549023) + tdSql.checkData(20, 2, 'tba1') + tdSql.checkData(21, 0, datetime.datetime(2021, 11, 11, 9, 0, 21)) + tdSql.checkData(21, 1, -2.185039863261519) + tdSql.checkData(21, 2, 'tba1') + tdSql.checkData(22, 0, datetime.datetime(2021, 11, 11, 9, 0, 22)) + tdSql.checkData(22, 1, -0.1425465430742778) + tdSql.checkData(22, 2, 'tba1') + tdSql.checkData(23, 0, datetime.datetime(2021, 11, 11, 9, 0, 23)) + tdSql.checkData(23, 1, 1.1578212823495775) + tdSql.checkData(23, 2, 'tba1') + tdSql.checkData(24, 0, datetime.datetime(2021, 11, 11, 9, 0, 24)) + tdSql.checkData(24, 1, -3.380515006246586) + tdSql.checkData(24, 2, 'tba1') + tdSql.checkData(25, 0, datetime.datetime(2021, 11, 11, 9, 0, 25)) + tdSql.checkData(25, 1, -0.29100619138474915) + tdSql.checkData(25, 2, 'tba1') + tdSql.checkData(26, 0, datetime.datetime(2021, 11, 11, 9, 0, 26)) + tdSql.checkData(26, 1, 0.8714479827243188) + tdSql.checkData(26, 2, 'tba1') + tdSql.checkData(27, 0, datetime.datetime(2021, 11, 11, 9, 0, 27)) + tdSql.checkData(27, 1, -6.799711455220379) + tdSql.checkData(27, 2, 'tba1') + tdSql.checkData(28, 0, datetime.datetime(2021, 11, 11, 9, 0, 28)) + tdSql.checkData(28, 1, -0.45231565944180985) + tdSql.checkData(28, 2, 'tba1') + tdSql.checkData(29, 0, datetime.datetime(2021, 11, 11, 9, 0, 29)) + tdSql.checkData(29, 1, 0.0) + tdSql.checkData(29, 2, 'tba1') + + tdSql.query('select tan(elapsed(ts)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.04180962355791556) + + tdSql.query('select tan(rate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.2512726598331747) + + tdSql.query('select tan(irate(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select tan(first(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1.5574077246549023) + + tdSql.query('select tan(last(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + tdSql.query('select tan(last_row(c2)) from tba1;') + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0.0) + + #=========== end math_tan2 ========== + + + #=========== begin math_pow ========== + tdSql.query('select pow(c2,13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 8192.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 67108864.0) + tdSql.checkData(5, 0, 2.235879388560037e+27) + tdSql.checkData(6, 0, -2.235879388560037e+27) + + tdSql.query('select pow(c2,c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, 1.5243074119957227e+267) + tdSql.checkData(6, 0, -6.560356474884124e-268) + + tdSql.query('select pow(c2,c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, -0.0) + + tdSql.query('select pow(c2,c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, -0.0) + + tdSql.query('select pow(c2,c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 65536.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,(c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 65536.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,(c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 268435456.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select pow(c2,11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 2.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2050.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4194308.0) + tdSql.checkData(5, 0, 1.3862479934032099e+23) + tdSql.checkData(6, 0, -1.3862479934032099e+23) + + tdSql.query('select pow(c2,c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 12.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 15.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 267.0) + tdSql.checkData(5, 0, 1.5243074119957227e+267) + tdSql.checkData(6, 0, 11.0) + + tdSql.query('select pow(c2,c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 65536.0) + tdSql.checkData(2, 0, inf) + tdSql.checkData(3, 0, 0.0) + + tdSql.query('select pow(c2,c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, -6.560356474884124e-268) + tdSql.checkData(1, 0, 1.5243074119957227e+267) + tdSql.checkData(2, 0, 256.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4.0) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, inf) + tdSql.checkData(2, 0, 65536.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 1.0) + + tdSql.query('select pow(c2,c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 65536.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select pow(c2,c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, 1.5243074119957227e+267) + tdSql.checkData(6, 0, -6.560356474884124e-268) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(9, 0, 27.0) + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(13, 0, 823543.0) + + tdSql.query('select pow(c2,c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select pow(c2,c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select pow(c2,c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, inf) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, -0.0) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 27.0) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 823543.0) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select pow(c2,c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 4.0) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 27.0) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 256.0) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 3125.0) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 46656.0) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 823543.0) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select pow(c2,c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, 1.5243074119957227e+267) + tdSql.checkData(6, 0, -6.560356474884124e-268) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(9, 0, 27.0) + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(13, 0, 823543.0) + + tdSql.query('select pow(c2,c3) from tb1 union all select pow(c2,c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, -0.0) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(9, 0, 27.0) + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(13, 0, 823543.0) + + #=========== end math_pow ========== + + + #=========== begin math_pow2 ========== + tdSql.query('select pow(c2,c4) as a from stb1 union all select pow(c2,c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 4.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 256.0) + tdSql.checkData(5, 0, inf) + tdSql.checkData(6, 0, -0.0) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 4.0) + tdSql.checkData(9, 0, 9.0) + tdSql.checkData(10, 0, 256.0) + tdSql.checkData(11, 0, 3125.0) + tdSql.checkData(12, 0, 46656.0) + tdSql.checkData(13, 0, 823543.0) + tdSql.checkData(14, 0, 1.0) + tdSql.checkData(15, 0, 4.0) + tdSql.checkData(16, 0, 27.0) + tdSql.checkData(17, 0, 256.0) + tdSql.checkData(18, 0, 3125.0) + tdSql.checkData(19, 0, 46656.0) + tdSql.checkData(20, 0, 823543.0) + tdSql.checkData(21, 0, 16777216.0) + tdSql.checkData(22, 0, 387420489.0) + tdSql.checkData(23, 0, 1.0) + tdSql.checkData(24, 0, 1.0) + tdSql.checkData(25, 0, 4.0) + tdSql.checkData(26, 0, 27.0) + tdSql.checkData(27, 0, 256.0) + tdSql.checkData(28, 0, 3125.0) + tdSql.checkData(29, 0, 46656.0) + tdSql.checkData(30, 0, 823543.0) + tdSql.checkData(31, 0, 16777216.0) + tdSql.checkData(32, 0, 387420489.0) + tdSql.checkData(33, 0, 1.0) + tdSql.checkData(34, 0, 1.0) + tdSql.checkData(35, 0, 4.0) + tdSql.checkData(36, 0, 27.0) + tdSql.checkData(37, 0, 256.0) + tdSql.checkData(38, 0, 3125.0) + tdSql.checkData(39, 0, 46656.0) + tdSql.checkData(40, 0, 823543.0) + tdSql.checkData(41, 0, 16777216.0) + tdSql.checkData(42, 0, 387420489.0) + tdSql.checkData(43, 0, 1.0) + + tdSql.query('select pow(c2,c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, 4.0) + tdSql.checkData(2, 0, 27.0) + tdSql.checkData(3, 0, 256.0) + tdSql.checkData(4, 0, 3125.0) + tdSql.checkData(5, 0, 46656.0) + tdSql.checkData(6, 0, 823543.0) + tdSql.checkData(7, 0, 16777216.0) + tdSql.checkData(8, 0, 387420489.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 4.0) + tdSql.checkData(12, 0, 27.0) + tdSql.checkData(13, 0, 256.0) + tdSql.checkData(14, 0, 3125.0) + tdSql.checkData(15, 0, 46656.0) + tdSql.checkData(16, 0, 823543.0) + tdSql.checkData(17, 0, 16777216.0) + tdSql.checkData(18, 0, 387420489.0) + tdSql.checkData(19, 0, 1.0) + tdSql.checkData(20, 0, 1.0) + tdSql.checkData(21, 0, 4.0) + tdSql.checkData(22, 0, 27.0) + tdSql.checkData(23, 0, 256.0) + tdSql.checkData(24, 0, 3125.0) + tdSql.checkData(25, 0, 46656.0) + tdSql.checkData(26, 0, 823543.0) + tdSql.checkData(27, 0, 16777216.0) + tdSql.checkData(28, 0, 387420489.0) + tdSql.checkData(29, 0, 1.0) + + #=========== end math_pow2 ========== + + + #=========== begin math_log ========== + tdSql.query('select log(c2,13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 0.27023815442731974) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.5404763088546395) + tdSql.checkData(5, 0, 1.8886092516277813) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.465913680008469) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c4) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.22544144151366513) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c5) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.11093150296463757) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.05459909915208762) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c7) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.006824887406193638) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c10) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.8748229478056855) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c11) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.4367939948774267) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c12) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.2183963964662152) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c13) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.11093150296463757) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2 + c3) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.6666666666666667) + tdSql.checkData(5, 0, 0.4657403972991969) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,(c2 + c3)) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.6666666666666667) + tdSql.checkData(5, 0, 0.4657403972991969) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,(c2 * c3)+c4-6) from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 0.5252990700743871) + tdSql.checkData(5, 0, 0.22542113212116985) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,11)+c2 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, 1.0) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 2.2890648263178877) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 4.578129652635775) + tdSql.checkData(5, 0, 129.02018292517226) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2)+11 from tb1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 12.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 12.0) + tdSql.checkData(5, 0, 12.0) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2+c3) from tb1 where c2 is not null and c3 is not null;') + tdSql.checkRows(4) + tdSql.checkData(0, 0, 0.0) + tdSql.checkData(1, 0, 0.6666666666666667) + tdSql.checkData(2, 0, 0.4657403972991969) + tdSql.checkData(3, 0, None) + + tdSql.query('select log(c2,c2) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, None) + + tdSql.query('select log(c2,c2+c3) from tb1 order by ts desc;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 0.4657403972991969) + tdSql.checkData(2, 0, 0.6666666666666667) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, None) + tdSql.checkData(5, 0, None) + tdSql.checkData(6, 0, 0.0) + + tdSql.query('select log(c2,c2+c3) from tb1 order by ts desc limit 3 offset 2;') + tdSql.checkRows(3) + tdSql.checkData(0, 0, 0.6666666666666667) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + tdSql.query('select log(c2,c2) from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(13, 0, 1.0) + + tdSql.query('select log(c2,c2) from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select log(c2,c4),t1 from stb1 order by ts desc;') + tdSql.checkRows(14) + tdSql.query('select log(c2,c3),tbname from stb1;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, None) + tdSql.checkData(0, 1, 'tb1') + tdSql.checkData(1, 0, None) + tdSql.checkData(1, 1, 'tb1') + tdSql.checkData(2, 0, None) + tdSql.checkData(2, 1, 'tb1') + tdSql.checkData(3, 0, None) + tdSql.checkData(3, 1, 'tb1') + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(4, 1, 'tb1') + tdSql.checkData(5, 0, 0.465913680008469) + tdSql.checkData(5, 1, 'tb1') + tdSql.checkData(6, 0, None) + tdSql.checkData(6, 1, 'tb1') + tdSql.checkData(7, 0, None) + tdSql.checkData(7, 1, 'tb2') + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(8, 1, 'tb2') + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(9, 1, 'tb2') + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(10, 1, 'tb2') + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(11, 1, 'tb2') + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(12, 1, 'tb2') + tdSql.checkData(13, 0, 1.0) + tdSql.checkData(13, 1, 'tb2') + + tdSql.query('select log(c2,c3),tbname from stb1 where t1 > 1;') + tdSql.checkRows(7) + tdSql.checkData(0, 0, None) + tdSql.checkData(0, 1, 'tb2') + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(1, 1, 'tb2') + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(2, 1, 'tb2') + tdSql.checkData(3, 0, 1.0) + tdSql.checkData(3, 1, 'tb2') + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(4, 1, 'tb2') + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(5, 1, 'tb2') + tdSql.checkData(6, 0, 1.0) + tdSql.checkData(6, 1, 'tb2') + + tdSql.query('select log(c2,c2) from (select * from stb1);') + tdSql.checkRows(14) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(13, 0, 1.0) + + tdSql.query('select log(c2,c3) from tb1 union all select log(c2,c3) from tb2;') + tdSql.checkRows(14) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.465913680008469) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.0) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(13, 0, 1.0) + + #=========== end math_log ========== + + + #=========== begin math_log2 ========== + tdSql.query('select log(c2,c4) as a from stb1 union all select log(c2,c5) as a from stba;') + tdSql.checkRows(44) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, None) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 0.22544144151366513) + tdSql.checkData(6, 0, None) + tdSql.checkData(7, 0, None) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, 1.5849625007211563) + tdSql.checkData(10, 0, 1.0) + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(13, 0, 1.0) + tdSql.checkData(14, 0, None) + tdSql.checkData(15, 0, 1.0) + tdSql.checkData(16, 0, 1.0) + tdSql.checkData(17, 0, 1.0) + tdSql.checkData(18, 0, 1.0) + tdSql.checkData(19, 0, 1.0) + tdSql.checkData(20, 0, 1.0) + tdSql.checkData(21, 0, 1.0) + tdSql.checkData(22, 0, 1.0) + tdSql.checkData(23, 0, None) + tdSql.checkData(24, 0, None) + tdSql.checkData(25, 0, 1.0) + tdSql.checkData(26, 0, 1.0) + tdSql.checkData(27, 0, 1.0) + tdSql.checkData(28, 0, 1.0) + tdSql.checkData(29, 0, 1.0) + tdSql.checkData(30, 0, 1.0) + tdSql.checkData(31, 0, 1.0) + tdSql.checkData(32, 0, 1.0) + tdSql.checkData(33, 0, None) + tdSql.checkData(34, 0, None) + tdSql.checkData(35, 0, 1.0) + tdSql.checkData(36, 0, 1.0) + tdSql.checkData(37, 0, 1.0) + tdSql.checkData(38, 0, 1.0) + tdSql.checkData(39, 0, 1.0) + tdSql.checkData(40, 0, 1.0) + tdSql.checkData(41, 0, 1.0) + tdSql.checkData(42, 0, 1.0) + tdSql.checkData(43, 0, None) + + tdSql.query('select log(c2,c2) from stba;') + tdSql.checkRows(30) + tdSql.checkData(0, 0, None) + tdSql.checkData(1, 0, 1.0) + tdSql.checkData(2, 0, 1.0) + tdSql.checkData(3, 0, 1.0) + tdSql.checkData(4, 0, 1.0) + tdSql.checkData(5, 0, 1.0) + tdSql.checkData(6, 0, 1.0) + tdSql.checkData(7, 0, 1.0) + tdSql.checkData(8, 0, 1.0) + tdSql.checkData(9, 0, None) + tdSql.checkData(10, 0, None) + tdSql.checkData(11, 0, 1.0) + tdSql.checkData(12, 0, 1.0) + tdSql.checkData(13, 0, 1.0) + tdSql.checkData(14, 0, 1.0) + tdSql.checkData(15, 0, 1.0) + tdSql.checkData(16, 0, 1.0) + tdSql.checkData(17, 0, 1.0) + tdSql.checkData(18, 0, 1.0) + tdSql.checkData(19, 0, None) + tdSql.checkData(20, 0, None) + tdSql.checkData(21, 0, 1.0) + tdSql.checkData(22, 0, 1.0) + tdSql.checkData(23, 0, 1.0) + tdSql.checkData(24, 0, 1.0) + tdSql.checkData(25, 0, 1.0) + tdSql.checkData(26, 0, 1.0) + tdSql.checkData(27, 0, 1.0) + tdSql.checkData(28, 0, 1.0) + tdSql.checkData(29, 0, None) + + #=========== end math_log2 ========== + + + + tdSql.execute('drop database math_funcs') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py index 404f922dc7a6fa07acf3fb74c93e66f9d052c6fe..a0567f3510a95708fb7469ec4549b81bda3aade2 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py @@ -37,12 +37,44 @@ class TDTestCase: tdSql.checkData(0, 0, 8) tdSql.query("select count(*) from db.stb1") tdSql.checkData(0, 0, 160) + tdSql.query("select distinct(c5) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c6) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c7) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c8) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c9) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c10) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c11) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c12) from db.stb1") + tdSql.checkData(0, 0, None) tdSql.execute("reset query cache") tdSql.query("select count(tbname) from db.`stb1-2`") tdSql.checkData(0, 0, 8) tdSql.query("select count(*) from db.`stb1-2`") tdSql.checkData(0, 0, 160) + tdSql.query("select distinct(c5) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c6) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c7) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c8) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c9) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c10) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c11) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c12) from db.`stb1-2`") + tdSql.checkData(0, 0, None) cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json" tdLog.info("%s" % cmd) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py index b859bef981c5cb454625901eb7427b65b0395d38..672da9a56e9985785b966559911d3e84b428518a 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py @@ -287,4 +287,4 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json index e52fadc8576c76e28079eb935f1c95d0302f6b41..8dc121e5a89d6567c5fbe79aab15c717f5cb2881 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_pool_size": 20, + "thread_pool_size": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "prepared_rand": 100, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json index 9ef1b933d8ea019004bc373529c26f4ba5c58018..12d6c383d3af8b66f4a120b885173aaed67d5d84 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json @@ -4,7 +4,7 @@ "confirm_parameter_prompt": "no", "databases": "db", "query_mode": "rest", - "thread_pool_size": 20, + "thread_pool_size": 10, "response_buffer": 10000, "specified_table_query": { diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json index 9bb5c4292cf9c1fb6628517dfc044fe2065e2c2e..3cdd2b911eaee7887312b659781d18f68caa1221 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_pool_size": 20, + "thread_pool_size": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "prepared_rand": 100, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json index 45cf05d3e620f0dfed070d01150ad4961087efaf..7326cbcba2e2eefffe49d34771cb301bcee7a16f 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_pool_size": 20, + "thread_pool_size": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "prepared_rand": 10, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/json_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json similarity index 99% rename from tests/develop-test/5-taos-tools/taosbenchmark/json/json_alltypes.json rename to tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json index f0c0f9649385006b6859c0247e86d9f0ed3cfb31..d23d12e800362668c325ff5d252ff4ff9c411656 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/json_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_pool_size": 20, + "thread_pool_size": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "prepared_rand": 10, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json new file mode 100644 index 0000000000000000000000000000000000000000..e037fff0aaa23fbb91cdc66e663aa1fe79d03864 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json @@ -0,0 +1,107 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [ + { + "dbinfo": { + "name": "db3", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + }, + "super_tables": [ + { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + } + ], + "tags": [ + { + "type": "INT" + } + ] + },{ + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 3, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + } + ], + "tags": [ + { + "type": "INT" + } + ] + } + ] + } + ] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json new file mode 100644 index 0000000000000000000000000000000000000000..5000da489fc4c97f1ee3515c095655d6b183bfff --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json @@ -0,0 +1,267 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [ + { + "dbinfo": { + "name": "db2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + }, + "super_tables": [ + { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ], + "tags": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ] + },{ + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 6, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ], + "tags": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ] + } + ] + } + ] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json new file mode 100644 index 0000000000000000000000000000000000000000..c9c4554e015a3927fe0b7fa97e7b7dc8826a95d1 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json @@ -0,0 +1,187 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + }, + "super_tables": [ + { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + } + ], + "tags": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ] + },{ + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + } + ], + "tags": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ] + } + ] + } + ] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/telnet_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json similarity index 99% rename from tests/develop-test/5-taos-tools/taosbenchmark/json/telnet_alltypes.json rename to tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json index 40f58d4f7ef75f0cb5c30abd45c8ec86409763da..739e0eaca4fed0985aa9ae06fea91e4a5f337373 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/telnet_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_pool_size": 20, + "thread_pool_size": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "prepared_rand": 10, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json new file mode 100644 index 0000000000000000000000000000000000000000..b76d3da3bd6d1ef90d2c51ebb5cccd3df4ec5b87 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json @@ -0,0 +1,90 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "telnet_tcp_port": 6046, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "telnet", + "tcp_transfer": "yes", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "telnet", + "tcp_transfer": "yes", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json index ebe5e3f043eac127acd4069a3088e5b49a782824..d917bec0c94fec4ee66e2cfe33e7e91aac9e8a3e 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_pool_size": 20, + "thread_pool_size": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "prepared_rand": 100, diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/super_subscribe.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/super_subscribe.json new file mode 100644 index 0000000000000000000000000000000000000000..6284caf8b26b85bc379df16a7f3914fa9a8a9297 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/super_subscribe.json @@ -0,0 +1,24 @@ +{ + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "super_table_query": { + "stblname": "stb", + "threads": 1, + "mode": "sync", + "interval": 1000, + "restart": "yes", + "keepProgress": "yes", + "endAfterConsume": 1, + "sqls": [ + { + "sql": "select * from xxxx;" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json index fea72a34fb74c52f06e7549008333d33ce537d08..0895f1b45cad6b6978e7820db0bf8936f772ffcc 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json @@ -6,7 +6,7 @@ "user": "root", "password": "taosdata", "thread_count": 4, - "thread_pool_size": 20, + "thread_pool_size": 10, "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "prepared_rand": 100, @@ -55,6 +55,7 @@ "sample_file": "./sample.csv", "use_sample_ts": "no", "tags_file": "", + "partial_col_num": 5, "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] },{ @@ -80,6 +81,7 @@ "sample_file": "./sample.csv", "use_sample_ts": "no", "tags_file": "", + "partial_col_num": 5, "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] }] diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json index 698fb599f595fbbc4a1fd130696e41059362ca50..6816ef5c3dd7dd9c59835a72bd10637814b4537d 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json @@ -55,6 +55,7 @@ "sample_file": "./sample.csv", "use_sample_ts": "no", "tags_file": "", + "partial_col_num": 999, "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] }] diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json index c78317aade33cd3fea4a400511dee5b1431bc473..346e3cd8680cf3836523daf693f46bb2a0e1cffd 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json @@ -55,7 +55,8 @@ "sample_file": "./sample.csv", "use_sample_ts": "no", "tags_file": "", - "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "partial_col_num": 3, + "columns": [{"type": "TIMESTAMP"},{"type": "INT", "len": 0}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] }] }] diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py index 20e64fa7458fecb87771bd98eec59a886e3663b3..803e57ecc873b89fdc941ee87f27ce38dde18a1a 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py @@ -56,6 +56,30 @@ class TDTestCase: tdSql.checkData(0, 0, 8) tdSql.query("select count(*) from db.stb") tdSql.checkData(0, 0, 40) + tdSql.query("select distinct(c1) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c3) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c4) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c5) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c6) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c7) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c8) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c9) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c10) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c11) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c12) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c13) from db.stb") + tdSql.checkData(0, 0, None) def stop(self): tdSql.close() diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py index f704d684fbb7a3d1f9778bccfac0a95ddbc34e4b..30ee6e87bdaaa26fad7550b0075939f8ee2f5cb9 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py @@ -38,9 +38,11 @@ class TDTestCase: tdSql.query("select count(tbname) from db.stb2") tdSql.checkData(0, 0, 8) tdSql.query("select count(*) from db.stb1") - tdSql.checkData(0, 0, 160) + result = tdSql.getData(0, 0) + assert result <= 160, "result is %s > expect: 160" % result tdSql.query("select count(*) from db.stb2") - tdSql.checkData(0, 0, 160) + result = tdSql.getData(0, 0) + assert result <= 160, "result is %s > expect: 160" % result def stop(self): tdSql.close() diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py index dc18bda7ecbfbc2207d5919bc663d1bd82c7ae3e..6816be6156e39faacbda4c470d99dc347875494e 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py @@ -29,7 +29,7 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/json_alltypes.json" + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_json_alltypes.json" tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py index 9285de99848acdd1674f6242d0865189d2e17920..0b96fd37389d61bf370ec54d85a160ad940970ae 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py @@ -29,7 +29,7 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/telnet_alltypes.json" + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json" tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py index 726b4188e0824530cb78330f07a822e93e8ecc51..13ff130a742c94f4a2ba176fcaff796fe0f9f605 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py @@ -38,8 +38,12 @@ class TDTestCase: tdSql.execute("insert into stb_2 using stb tags (2) values (now, 2)") cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/specified_subscribe.json -g" tdLog.info("%s" % cmd) - os.system("%s" % cmd) - tdSql.execute("reset query cache") + assert os.system("%s" % cmd) == 0 + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/super_subscribe.json -g" + tdLog.info("%s" % cmd) + assert os.system("%s" % cmd) == 0 + def stop(self): tdSql.close() diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py new file mode 100644 index 0000000000000000000000000000000000000000..4f60979e2a31148d6a193819bebfd531421b7b5f --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py @@ -0,0 +1,77 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_rest_telnet.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db.stb1") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(tbname) from db.stb2") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_rest_line.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db2.stb1") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db2.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(tbname) from db2.stb2") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db2.stb2") + tdSql.checkData(0, 0, 160) + + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_rest_json.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from db3.stb1") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db3.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(tbname) from db3.stb2") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db3.stb2") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py b/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py new file mode 100644 index 0000000000000000000000000000000000000000..7603bcf40902d9b057774f812553b20961de093d --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json" + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + time.sleep(5) + tdSql.execute("reset query cache") + tdSql.query("select count(tbname) from opentsdb_telnet.stb1") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from opentsdb_telnet.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(tbname) from opentsdb_telnet.stb2") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from opentsdb_telnet.stb2") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/fulltest-insert.sh b/tests/develop-test/fulltest-insert.sh index 532f7e6fc0446f6a68ca0a5e80be070684a71c23..e538abf8e2f9f22e9acbce017a3f42b9a6804818 100755 --- a/tests/develop-test/fulltest-insert.sh +++ b/tests/develop-test/fulltest-insert.sh @@ -1 +1,2 @@ -python3 ./test.py -f 1-insert/batchInsert.py \ No newline at end of file +python3 ./test.py -f 1-insert/batchInsert.py +python3 ./test.py -f 1-insert/uppercase_in_stmt.py diff --git a/tests/develop-test/fulltest-query.sh b/tests/develop-test/fulltest-query.sh index c4745fea1f48f7bd7d863c16e40e45fea210daf8..dc327bd9b582a3bbe74660ebd4abdd503602bcf8 100755 --- a/tests/develop-test/fulltest-query.sh +++ b/tests/develop-test/fulltest-query.sh @@ -2,7 +2,14 @@ python3 ./test.py -f 2-query/ts_hidden_column.py python3 ./test.py -f 2-query/union-order.py python3 ./test.py -f 2-query/session_two_stage.py python3 ./test.py -f 2-query/timeline_agg_func_groupby.py +python3 ./test.py -f 2-query/constant_compare.py python3 ./test.py -f 2-query/ts_2016.py python3 ./test.py -f 2-query/function_mavg.py python3 ./test.py -f 2-query/escape.py python3 ./test.py -f 2-query/function_histogram.py +python3 ./test.py -f 2-query/func_compare.py +python3 ./test.py -f 2-query/diff_ignore_negative.py +python3 ./test.py -f 2-query/diff_funcs.py +python3 ./test.py -f 2-query/TD-13246.py +python3 ./test.py -f 2-query/TD-6347.py +python3 ./test.py -f 2-query/math_funcs.py diff --git a/tests/examples/c/CMakeLists.txt b/tests/examples/c/CMakeLists.txt index 62a82f20156fccd314aab13d0a52f805a1a9a7af..3c0a4d32db154a942fbc05183ed7379b3e2fe5ad 100644 --- a/tests/examples/c/CMakeLists.txt +++ b/tests/examples/c/CMakeLists.txt @@ -21,4 +21,7 @@ IF (TD_DARWIN) TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread ${LINK_LUA}) ADD_EXECUTABLE(epoll epoll.c) TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread ${LINK_LUA}) + + ADD_EXECUTABLE(parameter-binding parameter-binding.c) + TARGET_LINK_LIBRARIES(parameter-binding taos) ENDIF () diff --git a/tests/examples/c/parameter-binding.c b/tests/examples/c/parameter-binding.c new file mode 100644 index 0000000000000000000000000000000000000000..6034c66cbf21b1e3ae819d3f40ec032118a5b01c --- /dev/null +++ b/tests/examples/c/parameter-binding.c @@ -0,0 +1,607 @@ +#include +#include +#include +#include +#include + +bool isPrint = true; + +void one_batch_one_table_1(TAOS *conn, long totalRows, long batchRows); +void one_batch_one_table_2(TAOS *conn, long totalRows, long batchRows); +void one_batch_one_table_3(TAOS *conn, long totalRows, long batchRows); +void one_batch_one_table_4(TAOS *conn, long totalRows, long batchRows); +void one_batch_one_table_5(TAOS *conn, long totalRows, long batchRows); +void one_batch_one_table_6(TAOS *conn, long totalRows, long batchRows); +void one_batch_one_table_7(TAOS *conn, long totalRows, long batchRows); + +void one_batch_multi_table_1(TAOS *conn, long totalRows, long batchRows, int tables); +void one_batch_multi_table_2(TAOS *conn, long totalRows, long batchRows, int tables); +void one_batch_multi_table_3(TAOS *conn, long totalRows, long batchRows, int tables); + +void execute(TAOS *conn, char *sql); +void prepare_normal_table(TAOS *conn); +void prepare_super_and_sub_table(TAOS *conn, int subTables); +void prepare_super_table(TAOS *conn, int subTables); +int64_t getCurrentTimeMill(); + +TAOS_STMT *A(TAOS *); +void B(TAOS_STMT *stmt, char sql[]); +void C(TAOS_STMT *stmt, char sql[]); +void D(TAOS_STMT *stmt, char sql[], int tag); +void E(TAOS_STMT *stmt); +void F(TAOS_STMT *stmt, int64_t ts_start); +void G1(TAOS_STMT *stmt, int64_t ts_start, int rows); +void G2(TAOS_STMT *stmt, int rows); +void H(TAOS_STMT *stmt, int64_t ts_start, int rows); +void I(TAOS_STMT *stmt); +void J(TAOS_STMT *stmt); +void L(TAOS_STMT *stmt); + +int main() { + char host[] = "192.168.56.105"; + srand(time(NULL)); + + // connect + TAOS *conn = taos_connect(host, "root", "taosdata", NULL, 0); + if (conn == NULL) { + printf("failed to connect to:%s, reason:%s\n", host, "null taos"); + exit(-1); + } + execute(conn, "drop database if exists test"); + execute(conn, "create database if not exists test"); + execute(conn, "use test"); + + long totalRows = 1000000; + long batchRows = 32767; + int tables = 10; + + prepare_super_table(conn, 1); + // A -> B -> D -> [F -> I]... -> J -> L + // one_batch_one_table_1(conn, totalRows, batchRows); + // A -> B -> [D -> [F -> I]... -> J]... -> L + // one_batch_one_table_2(conn, totalRows, batchRows); + // A -> B -> D -> [F... -> I -> J]... -> L + // one_batch_one_table_3(conn, totalRows, batchRows); + // A -> B -> D -> [H -> I -> J]... -> L + // one_batch_one_table_4(conn, totalRows, batchRows); + // A -> B -> [D -> H -> I -> J]... -> L + // one_batch_one_table_5(conn, totalRows, batchRows); + // A -> B -> [D -> H -> I -> J]... -> L + // one_batch_one_table_6(conn, totalRows, batchRows); + // A -> B -> [D -> H -> I -> J]... -> L + // one_batch_one_table_7(conn, totalRows, batchRows); + + // A -> B -> [D -> [F -> I]... -> J]... -> L + // one_batch_multi_table_1(conn, totalRows, batchRows, tables); + // A -> B -> [D -> H -> I -> J]... -> L + // one_batch_multi_table_2(conn, totalRows, batchRows, tables); + // A -> B -> [D -> G1 -> G2 -> I -> J]... -> L + one_batch_multi_table_3(conn, totalRows, batchRows, tables); + + // close + taos_close(conn); + taos_cleanup(); + exit(0); +} + +// A -> B -> D -> [F -> I]... -> J -> L +void one_batch_one_table_1(TAOS *conn, long totalRows, long batchRows) { + // given + time_t current; + time(¤t); + current -= totalRows; + + int64_t start = getCurrentTimeMill(); + + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) (ts, f1) values(?, ?)"); + D(stmt, "t1", 1); + for (int i = 1; i <= totalRows; ++i) { + F(stmt, (current + i - 1) * 1000); + I(stmt); + if (i % batchRows == 0 || i == totalRows) { + J(stmt); + } + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, time cost: %lld ms\n", totalRows, batchRows, (end - start)); +} + +// A -> B -> D -> [F -> I]... -> J -> L +void one_batch_one_table_2(TAOS *conn, long totalRows, long batchRows) { + // given + time_t current; + time(¤t); + current -= totalRows; + + int64_t start = getCurrentTimeMill(); + + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) (ts, f1) values(?, ?)"); + for (int i = 1; i <= totalRows; ++i) { + if (i % batchRows == 1) { + D(stmt, "t1", 1); + } + + F(stmt, (current + i - 1) * 1000); + I(stmt); + if (i % batchRows == 0 || i == totalRows) { + J(stmt); + } + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, time cost: %lld ms\n", totalRows, batchRows, (end - start)); +} + +void one_batch_one_table_3(TAOS *conn, long totalRows, long batchRows) { + // given + time_t current; + time(¤t); + current -= totalRows; + + int64_t start = getCurrentTimeMill(); + + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) (ts, f1) values(?, ?)"); + D(stmt, "t1", 1); + for (int i = 1; i <= totalRows; ++i) { + F(stmt, (current + i - 1) * 1000); + if (i % batchRows == 0 || i == totalRows) { + I(stmt); + J(stmt); + } + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, time cost: %lld ms\n", totalRows, batchRows, (end - start)); +} + +void one_batch_one_table_4(TAOS *conn, long totalRows, long batchRows) { + // given + time_t current; + time(¤t); + current -= totalRows; + + int64_t start = getCurrentTimeMill(); + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) values(?,?)"); + D(stmt, "t1", 1); + for (int i = 1; i <= totalRows; i += batchRows) { + int rows = (i + batchRows) > totalRows ? (totalRows + 1 - i) : batchRows; + H(stmt, (current + i) * 1000, rows); + I(stmt); + J(stmt); + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, time cost: %lld ms\n", totalRows, batchRows, (end - start)); +} + +void one_batch_one_table_5(TAOS *conn, long totalRows, long batchRows) { + // given + time_t current; + time(¤t); + current -= totalRows; + + int64_t start = getCurrentTimeMill(); + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) values(?,?)"); + for (int i = 1; i <= totalRows; i += batchRows) { + D(stmt, "t1", 1); + int rows = (i + batchRows) > totalRows ? (totalRows + 1 - i) : batchRows; + H(stmt, (current + i) * 1000, rows); + I(stmt); + J(stmt); + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, time cost: %lld ms\n", totalRows, batchRows, (end - start)); +} + +void one_batch_one_table_6(TAOS *conn, long totalRows, long batchRows) { + // given + time_t current; + time(¤t); + current -= totalRows; + + int64_t start = getCurrentTimeMill(); + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) values(?,?)"); + D(stmt, "t1", 1); + for (int i = 1; i <= totalRows; i += batchRows) { + int rows = (i + batchRows) > totalRows ? (totalRows + 1 - i) : batchRows; + G1(stmt, (current + i) * 1000, rows); + G2(stmt, rows); + I(stmt); + J(stmt); + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, time cost: %lld ms\n", totalRows, batchRows, (end - start)); +} + +void one_batch_one_table_7(TAOS *conn, long totalRows, long batchRows) { + // given + time_t current; + time(¤t); + current -= totalRows; + + int64_t start = getCurrentTimeMill(); + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) values(?,?)"); + for (int i = 1; i <= totalRows; i += batchRows) { + if (i % batchRows == 1) { + D(stmt, "t1", 1); + } + int rows = (i + batchRows) > totalRows ? (totalRows + 1 - i) : batchRows; + G1(stmt, (current + i) * 1000, rows); + G2(stmt, rows); + I(stmt); + J(stmt); + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, time cost: %lld ms\n", totalRows, batchRows, (end - start)); +} + +void one_batch_multi_table_1(TAOS *conn, long totalRows, long batchRows, int tables) { + // given + time_t current; + time(¤t); + long eachTable = (totalRows - 1) / tables + 1; + current -= eachTable; + + int64_t start = getCurrentTimeMill(); + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) values(?, ?)"); + + for (int tbIndex = 0; tbIndex < tables; ++tbIndex) { + char tbname[10]; + sprintf(tbname, "t%d", tbIndex); + + eachTable = ((tbIndex + 1) * eachTable > totalRows) ? (totalRows - tbIndex * eachTable) : eachTable; + for (int rowIndex = 1; rowIndex <= eachTable; ++rowIndex) { + if (rowIndex % batchRows == 1) { + D(stmt, tbname, tbIndex); + if (isPrint) + printf("\ntbIndex: %d, table_rows: %ld, rowIndex: %d, batch_rows: %ld\n", tbIndex, eachTable, rowIndex, + batchRows); + } + F(stmt, (current + rowIndex - 1) * 1000); + I(stmt); + if (rowIndex % batchRows == 0 || rowIndex == eachTable) { + J(stmt); + } + } + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, table: %d, eachTableRows: %ld, time cost: %lld ms\n", totalRows, batchRows, + tables, eachTable, (end - start)); +} + +void one_batch_multi_table_2(TAOS *conn, long totalRows, long batchRows, int tables) { + // given + time_t current; + time(¤t); + long eachTable = (totalRows - 1) / tables + 1; + current -= eachTable; + + int64_t start = getCurrentTimeMill(); + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) values(?,?)"); + for (int tbIndex = 0; tbIndex < tables; ++tbIndex) { + char tbname[10]; + sprintf(tbname, "t%d", tbIndex); + + eachTable = ((tbIndex + 1) * eachTable > totalRows) ? (totalRows - tbIndex * eachTable) : eachTable; + for (int rowIndex = 1; rowIndex <= eachTable; rowIndex += batchRows) { + int rows = (rowIndex + batchRows) > eachTable ? (eachTable + 1 - rowIndex) : batchRows; + + if (rowIndex % batchRows == 1) { + D(stmt, tbname, tbIndex); + if (isPrint) + printf("\ntbIndex: %d, table_rows: %ld, rowIndex: %d, batch_rows: %d\n", tbIndex, eachTable, rowIndex, rows); + } + + H(stmt, (current + rowIndex) * 1000, rows); + I(stmt); + J(stmt); + } + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, table: %d, eachTableRows: %ld, time cost: %lld ms\n", totalRows, batchRows, + tables, eachTable, (end - start)); +} + +void one_batch_multi_table_3(TAOS *conn, long totalRows, long batchRows, int tables) { + // given + time_t current; + time(¤t); + long eachTable = (totalRows - 1) / tables + 1; + current -= eachTable; + + int64_t start = getCurrentTimeMill(); + // when + TAOS_STMT *stmt = A(conn); + B(stmt, "insert into ? using weather tags(?) values(?, ?)"); + for (int tbIndex = 0; tbIndex < tables; ++tbIndex) { + char tbname[10]; + sprintf(tbname, "t%d", tbIndex); + + eachTable = ((tbIndex + 1) * eachTable > totalRows) ? (totalRows - tbIndex * eachTable) : eachTable; + for (int rowIndex = 1; rowIndex <= eachTable; rowIndex += batchRows) { + int rows = (rowIndex + batchRows) > eachTable ? (eachTable + 1 - rowIndex) : batchRows; + + if (rowIndex % batchRows == 1) { + D(stmt, tbname, tbIndex); + if (isPrint) + printf("\ntbIndex: %d, table_rows: %ld, rowIndex: %d, batch_rows: %d\n", tbIndex, eachTable, rowIndex, rows); + } + G1(stmt, (current + rowIndex) * 1000, rows); + G2(stmt, rows); + I(stmt); + J(stmt); + } + } + L(stmt); + + int64_t end = getCurrentTimeMill(); + printf("totalRows: %ld, batchRows: %ld, table: %d, eachTableRows: %ld, time cost: %lld ms\n", totalRows, batchRows, + tables, eachTable, (end - start)); +} + +void execute(TAOS *conn, char *sql) { + TAOS_RES *psql = taos_query(conn, sql); + if (psql == NULL) { + printf("failed to execute: %s, reason: %s\n", sql, taos_errstr(psql)); + taos_free_result(psql); + taos_close(conn); + exit(-1); + } + taos_free_result(psql); +} + +TAOS_STMT *A(TAOS *conn) { + if (isPrint) printf("A -> "); + return taos_stmt_init(conn); +} + +void B(TAOS_STMT *stmt, char sql[]) { + if (isPrint) printf("B -> "); + + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0) { + printf("failed to prepare stmt: %s, reason: %s\n", sql, taos_stmt_errstr(stmt)); + return; + } +} + +void C(TAOS_STMT *stmt, char tbname[]) { + if (isPrint) printf("C -> "); + + int code = taos_stmt_set_tbname(stmt, tbname); + if (code != 0) printf("failed to set_tbname_tags, reason: %s\n", taos_stmt_errstr(stmt)); +} + +void D(TAOS_STMT *stmt, char tbname[], int tag) { + if (isPrint) printf("D -> "); + + TAOS_BIND tags[1]; + tags[0].buffer_type = TSDB_DATA_TYPE_INT; + int tag_value = tag >= 0 ? tag : rand() % 100; + tags[0].buffer = &tag_value; + tags[0].buffer_length = sizeof(tag_value); + tags[0].length = &tags[0].buffer_length; + tags[0].is_null = NULL; + // set_tbname_tags + int code = taos_stmt_set_tbname_tags(stmt, tbname, tags); + if (code != 0) printf("failed to set_tbname_tags, reason: %s\n", taos_stmt_errstr(stmt)); +} + +void E(TAOS_STMT *stmt) { + // TODO +} + +void F(TAOS_STMT *stmt, int64_t ts) { + if (isPrint) printf("F -> "); + + TAOS_BIND params[2]; + // timestamp + params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[0].buffer = &ts; + params[0].buffer_length = sizeof(ts); + params[0].length = ¶ms[0].buffer_length; + params[0].is_null = NULL; + // int + int value = rand() % 100; + params[1].buffer_type = TSDB_DATA_TYPE_INT; + params[1].buffer = &value; + params[1].buffer_length = sizeof(value); + params[1].length = ¶ms[1].buffer_length; + params[1].is_null = NULL; + + // bind + int code = taos_stmt_bind_param(stmt, params); + if (0 != code) printf("failed to bind_param, reason: %s\n", taos_stmt_errstr(stmt)); +} + +void H(TAOS_STMT *stmt, int64_t ts_start, int rows) { + if (isPrint) printf("H -> "); + + TAOS_MULTI_BIND params[2]; + // timestamp + int64_t ts[rows]; + for (int i = 0; i < rows; ++i) { + ts[i] = ts_start + i * 1000; + } + params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[0].buffer = ts; + params[0].buffer_length = sizeof(ts[0]); + params[0].length = malloc(sizeof(int64_t) * rows); + char is_null[rows]; + for (int i = 0; i < rows; i++) { + is_null[i] = 0; + } + params[0].is_null = is_null; + params[0].num = rows; + // f1 + int32_t values[rows]; + for (int i = 0; i < rows; ++i) { + values[i] = rand() % 100; + } + params[1].buffer_type = TSDB_DATA_TYPE_INT; + params[1].buffer = values; + params[1].buffer_length = sizeof(int32_t); + params[1].length = malloc(sizeof(int32_t) * rows); + params[1].is_null = is_null; + params[1].num = rows; + + int code = taos_stmt_bind_param_batch(stmt, params); + if (code != 0) { + printf("failed to bind_param_batch, reason: %s\n", taos_stmt_errstr(stmt)); + return; + } +} + +void G1(TAOS_STMT *stmt, int64_t ts_start, int rows) { + if (isPrint) printf("G1 -> "); + + // timestamp + TAOS_MULTI_BIND param0[1]; + int64_t ts[rows]; + for (int i = 0; i < rows; ++i) { + ts[i] = ts_start + i * 1000; + } + param0[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + param0[0].buffer = ts; + param0[0].buffer_length = sizeof(ts[0]); + param0[0].length = malloc(sizeof(int64_t) * rows); + char is_null[rows]; + for (int i = 0; i < rows; i++) { + is_null[i] = 0; + } + param0[0].is_null = is_null; + param0[0].num = rows; + int code = taos_stmt_bind_single_param_batch(stmt, param0, 0); + if (code != 0) { + printf("failed to bind_single_param_batch, reason: %s\n", taos_stmt_errstr(stmt)); + return; + } +} + +void G2(TAOS_STMT *stmt, int rows) { + if (isPrint) printf("G2 -> "); + + // f1 + TAOS_MULTI_BIND param1[1]; + int32_t values[rows]; + for (int i = 0; i < rows; ++i) { + values[i] = rand() % 100; + } + param1[0].buffer_type = TSDB_DATA_TYPE_INT; + param1[0].buffer = values; + param1[0].buffer_length = sizeof(int32_t); + param1[0].length = malloc(sizeof(int32_t) * rows); + char is_null[rows]; + for (int i = 0; i < rows; i++) { + is_null[i] = 0; + } + param1[0].is_null = is_null; + param1[0].num = rows; + + int code = taos_stmt_bind_single_param_batch(stmt, param1, 1); + if (code != 0) { + printf("failed to bind_single_param_batch, reason: %s\n", taos_stmt_errstr(stmt)); + return; + } +} + +void I(TAOS_STMT *stmt) { + if (isPrint) printf("I -> "); + + int code = taos_stmt_add_batch(stmt); + if (code != 0) { + printf("failed to add_batch, reason: %s\n", taos_stmt_errstr(stmt)); + return; + } +} + +void J(TAOS_STMT *stmt) { + if (isPrint) printf("J -> "); + + int code = taos_stmt_execute(stmt); + if (code != 0) { + printf("failed to execute, reason: %s\n", taos_stmt_errstr(stmt)); + return; + } +} + +void L(TAOS_STMT *stmt) { + if (isPrint) printf("L\n"); + + taos_stmt_close(stmt); +} + +void prepare_super_table(TAOS *conn, int subTables) { + char sql[100] = "drop table weather"; + execute(conn, sql); + if (isPrint) printf("sql>>> %s\n", sql); + + sprintf(sql, "create table weather(ts timestamp, f1 int) tags(t1 int)"); + execute(conn, sql); + if (isPrint) printf("sql>>> %s\n", sql); + + for (int i = 0; i < subTables; i++) { + sprintf(sql, "drop table t%d", i); + if (isPrint) printf("sql>>> %s\n", sql); + execute(conn, sql); + } +} + +void prepare_normal_table(TAOS *conn) { + execute(conn, "drop table weather"); + execute(conn, "create table weather(ts timestamp, f1 int) tags(t1 int)"); +} + +void prepare_super_and_sub_table(TAOS *conn, int subTables) { + execute(conn, "drop table weather"); + execute(conn, "create table weather(ts timestamp, f1 int) tags(t1 int)"); + for (int i = 0; i < subTables; i++) { + char sql[100]; + sprintf(sql, "drop table t%d", i); + if (isPrint) printf("sql>>> %s\n", sql); + execute(conn, sql); + + sprintf(sql, "create table t%d using weather tags(%d)", i, i); + if (isPrint) printf("sql>>> %s\n", sql); + execute(conn, sql); + } +} + +int64_t getCurrentTimeMill() { + struct timeval tv; + gettimeofday(&tv, NULL); + return ((unsigned long long)tv.tv_sec * 1000 + (unsigned long long)tv.tv_usec / 1000); +} \ No newline at end of file diff --git a/tests/examples/lua/OpenResty/rest/test.lua b/tests/examples/lua/OpenResty/rest/test.lua index 2dc0cf10f22b90c8bcb925700b1d7ebd00ff153a..9b2169a2da656ea34f60e99c8904b5f224da3dd4 100644 --- a/tests/examples/lua/OpenResty/rest/test.lua +++ b/tests/examples/lua/OpenResty/rest/test.lua @@ -28,7 +28,7 @@ else ngx.say("select db--- pass.") end -res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))") +res = driver.query(conn,"create table m1 (ts timestamp, speed int, owner binary(20), mark nchar(30))") if res.code ~=0 then ngx.say("create table---failed: "..res.error) @@ -36,7 +36,7 @@ else ngx.say("create table--- pass.") end -res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 0, 'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')") +res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 0, 'robotspace', '世界人民大团结万岁'), ('2019-09-01 00:00:00.002',1,'Hilink','⾾⾿⿀⿁⿂⿃⿄⿅⿆⿇⿈⿉⿊⿋⿌⿍⿎⿏⿐⿑⿒⿓⿔⿕'),('2019-09-01 00:00:00.003',2,'Harmony', '₠₡₢₣₤₥₦₧₨₩₪₫€₭₮₯₰₱₲₳₴₵')") if res.code ~=0 then ngx.say("insert records failed: "..res.error) return @@ -56,13 +56,12 @@ if res.code ~=0 then else ngx.say(cjson.encode(res)) if (#(res.item) == 3) then - ngx.say("select--- pass") + ngx.say("select--- pass") else ngx.say("select--- failed: expect 3 affected records, actually received "..#(res.item)) end end - --[[ local flag = false function query_callback(res) diff --git a/tests/examples/lua/OpenResty/so/luaconnector51.so b/tests/examples/lua/OpenResty/so/luaconnector51.so index d8e4f00fec321ce5f48d4241176a59ee8df5d50c..a17c0318bd1e9196311ca97b20a8dde4fc1835a6 100755 Binary files a/tests/examples/lua/OpenResty/so/luaconnector51.so and b/tests/examples/lua/OpenResty/so/luaconnector51.so differ diff --git a/tests/examples/lua/lua51/lua_connector51.c b/tests/examples/lua/lua51/lua_connector51.c index b6e0b6d1de200b09750ffba6845ae9bf0606f4d8..7aad42f29343306516c6d8973b76060f3e1e6dfe 100644 --- a/tests/examples/lua/lua51/lua_connector51.c +++ b/tests/examples/lua/lua51/lua_connector51.c @@ -35,7 +35,7 @@ static int l_connect(lua_State *L){ } lua_getfield(L, 1, "port"); - if (lua_isnumber(L,-1)){ + if (lua_isnumber(L, -1)){ port = lua_tonumber(L, -1); //printf("port = %d\n", port); } @@ -113,7 +113,6 @@ static int l_query(lua_State *L){ int rows = 0; int num_fields = taos_field_count(result); const TAOS_FIELD *fields = taos_fetch_fields(result); - //char temp[256]; const int affectRows = taos_affected_rows(result); // printf(" affect rows:%d\r\n", affectRows); @@ -122,7 +121,7 @@ static int l_query(lua_State *L){ lua_pushinteger(L, affectRows); lua_setfield(L, table_index, "affected"); lua_newtable(L); - + while ((row = taos_fetch_row(result))) { //printf("row index:%d\n",rows); rows++; @@ -136,17 +135,21 @@ static int l_query(lua_State *L){ } lua_pushstring(L,fields[i].name); - + int32_t* length = taos_fetch_lengths(result); switch (fields[i].type) { + case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_TINYINT: lua_pushinteger(L,*((char *)row[i])); break; + case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_SMALLINT: lua_pushinteger(L,*((short *)row[i])); break; + case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: lua_pushinteger(L,*((int *)row[i])); break; + case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_BIGINT: lua_pushinteger(L,*((int64_t *)row[i])); break; @@ -156,9 +159,11 @@ static int l_query(lua_State *L){ case TSDB_DATA_TYPE_DOUBLE: lua_pushnumber(L,*((double *)row[i])); break; + case TSDB_DATA_TYPE_JSON: case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - lua_pushstring(L,(char *)row[i]); + //printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]); + lua_pushlstring(L,(char *)row[i], length[i]); break; case TSDB_DATA_TYPE_TIMESTAMP: lua_pushinteger(L,*((int64_t *)row[i])); @@ -166,6 +171,7 @@ static int l_query(lua_State *L){ case TSDB_DATA_TYPE_BOOL: lua_pushinteger(L,*((char *)row[i])); break; + case TSDB_DATA_TYPE_NULL: default: lua_pushnil(L); break; diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c index 06568f35d656d5d9af1ae2e88eeaeba92f0ede91..035b17eb2a729c5267996fe7e3b7d3e1cf122d3e 100644 --- a/tests/examples/lua/lua_connector.c +++ b/tests/examples/lua/lua_connector.c @@ -28,14 +28,14 @@ static int l_connect(lua_State *L){ luaL_checktype(L, 1, LUA_TTABLE); - lua_getfield(L,1,"host"); + lua_getfield(L, 1,"host"); if (lua_isstring(L,-1)){ host = lua_tostring(L, -1); // printf("host = %s\n", host); } lua_getfield(L, 1, "port"); - if (lua_isinteger(L,-1)){ + if (lua_isinteger(L, -1)){ port = lua_tointeger(L, -1); //printf("port = %d\n", port); } @@ -113,7 +113,6 @@ static int l_query(lua_State *L){ int rows = 0; int num_fields = taos_field_count(result); const TAOS_FIELD *fields = taos_fetch_fields(result); - //char temp[256]; const int affectRows = taos_affected_rows(result); // printf(" affect rows:%d\r\n", affectRows); @@ -122,7 +121,7 @@ static int l_query(lua_State *L){ lua_pushinteger(L, affectRows); lua_setfield(L, table_index, "affected"); lua_newtable(L); - + while ((row = taos_fetch_row(result))) { //printf("row index:%d\n",rows); rows++; @@ -136,17 +135,21 @@ static int l_query(lua_State *L){ } lua_pushstring(L,fields[i].name); - + int32_t* length = taos_fetch_lengths(result); switch (fields[i].type) { + case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_TINYINT: lua_pushinteger(L,*((char *)row[i])); break; + case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_SMALLINT: lua_pushinteger(L,*((short *)row[i])); break; + case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: lua_pushinteger(L,*((int *)row[i])); break; + case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_BIGINT: lua_pushinteger(L,*((int64_t *)row[i])); break; @@ -156,9 +159,11 @@ static int l_query(lua_State *L){ case TSDB_DATA_TYPE_DOUBLE: lua_pushnumber(L,*((double *)row[i])); break; + case TSDB_DATA_TYPE_JSON: case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - lua_pushstring(L,(char *)row[i]); + //printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]); + lua_pushlstring(L,(char *)row[i], length[i]); break; case TSDB_DATA_TYPE_TIMESTAMP: lua_pushinteger(L,*((int64_t *)row[i])); @@ -166,6 +171,7 @@ static int l_query(lua_State *L){ case TSDB_DATA_TYPE_BOOL: lua_pushinteger(L,*((char *)row[i])); break; + case TSDB_DATA_TYPE_NULL: default: lua_pushnil(L); break; diff --git a/tests/examples/lua/test.lua b/tests/examples/lua/test.lua index 89c0904c6a04ecec79a95cb1f710136e93a4a00b..c124b50a4dbd954ab47098e527bd2d35ee44384e 100644 --- a/tests/examples/lua/test.lua +++ b/tests/examples/lua/test.lua @@ -37,7 +37,7 @@ else print("select db--- pass.") end -res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))") +res = driver.query(conn,"create table m1 (ts timestamp, speed int, owner binary(20), mark nchar(30))") if res.code ~=0 then print("create table---failed: "..res.error) return @@ -45,7 +45,7 @@ else print("create table--- pass.") end -res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')") +res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 0, 'robotspace', '世界人民大团结万岁'), ('2019-09-01 00:00:00.002', 1, 'Hilink', '⾾⾿⿀⿁⿂⿃⿄⿅⿆⿇⿈⿉⿊⿋⿌⿍⿎⿏⿐⿑⿒⿓⿔⿕'),('2019-09-01 00:00:00.003', 2, 'Harmony', '₠₡₢₣₤₥₦₧₨₩₪₫€₭₮₯₰₱₲₳₴₵')") if res.code ~=0 then print("insert records failed: "..res.error) return @@ -64,7 +64,11 @@ if res.code ~=0 then return else if (#(res.item) == 3) then - print("select--- pass") + print("select--- pass") + print(res.item[1].mark) + print(res.item[2].mark) + print(res.item[3].mark) + else print("select--- failed: expect 3 affected records, actually received "..#(res.item)) end diff --git a/tests/parallel_test/Jenkinsfile b/tests/parallel_test/Jenkinsfile new file mode 100644 index 0000000000000000000000000000000000000000..95c8cf83d9b6b2514588234cbce27c06343e4c17 --- /dev/null +++ b/tests/parallel_test/Jenkinsfile @@ -0,0 +1,366 @@ +import hudson.model.Result +import hudson.model.*; +import jenkins.model.CauseOfInterruption +node { +} +def sync_source() { + sh ''' + hostname + date + ''' + sh ''' + cd ${WKC} + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" + git reset --hard HEAD~10 >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WKC} + git checkout master + ''' + } else if (env.CHANGE_TARGET == '2.0') { + sh ''' + cd ${WKC} + git checkout 2.0 + ''' + } else if (env.CHANGE_TARGET == '2.4') { + sh ''' + cd ${WKC} + git checkout 2.4 + ''' + } else { + sh ''' + cd ${WKC} + git checkout develop + ''' + } + } + sh''' + cd ${WKC} + git remote prune origin + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" + git pull >/dev/null + git fetch origin +refs/pull/${CHANGE_ID}/merge + git checkout -qf FETCH_HEAD + git clean -dfx + git submodule update --init --recursive + cd ${WK} + git reset --hard HEAD~10 + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WK} + git checkout master + ''' + } else if (env.CHANGE_TARGET == '2.0') { + sh ''' + cd ${WK} + git checkout 2.0 + ''' + } else if (env.CHANGE_TARGET == '2.4') { + sh ''' + cd ${WK} + git checkout 2.4 + ''' + } else { + sh ''' + cd ${WK} + git checkout develop + ''' + } + } + sh ''' + cd ${WK} + git pull >/dev/null + export TZ=Asia/Harbin + date + git clean -dfx + ''' +} +def pre_test() { + sync_source() + sh ''' + cd ${WK} + mkdir -p debug + cd debug + go env -w GOPROXY=https://goproxy.cn,direct + go env -w GO111MODULE=on + cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null + make -j8 >/dev/null + ''' + return 1 +} +def pre_test_mac() { + sync_source() + sh ''' + cd ${WK} + mkdir -p debug + cd debug + go env -w GOPROXY=https://goproxy.cn,direct + go env -w GO111MODULE=on + cmake .. -DBUILD_TOOLS=false > /dev/null + make -j8 >/dev/null + ''' + return 1 +} +pipeline { + agent {label " dispatcher "} + options { skipDefaultCheckout() } + environment{ + WK = '/var/data/jenkins/workspace/TDinternal' + WKC = '/var/data/jenkins/workspace/TDinternal/community' + LOGDIR = '/var/data/jenkins/workspace/log' + } + stages { + stage ('pre_build') { + steps { + sh ''' + date + pwd + env + hostname + ''' + } + } + stage ('Parallel build stage') { + //only build pr + options { skipDefaultCheckout() } + when { + allOf { + changeRequest() + not { expression { env.CHANGE_BRANCH =~ /docs\// }} + } + } + parallel { + stage ('dispatcher sync source') { + steps { + timeout(time: 20, unit: 'MINUTES') { + sync_source() + script { + sh ''' + echo "dispatcher ready" + date + ''' + } + } + } + } + stage ('build worker01') { + agent {label " worker01 "} + steps { + timeout(time: 20, unit: 'MINUTES') { + pre_test() + script { + sh ''' + echo "worker01 build done" + date + ''' + } + } + } + } + stage ('build worker02') { + agent {label " worker02 "} + steps { + timeout(time: 20, unit: 'MINUTES') { + pre_test() + script { + sh ''' + echo "worker02 build done" + date + ''' + } + } + } + } + stage ('build worker03') { + agent {label " worker03 "} + steps { + timeout(time: 20, unit: 'MINUTES') { + pre_test() + script { + sh ''' + echo "worker03 build done" + date + ''' + } + } + } + } + stage ('build worker04') { + agent {label " worker04 "} + steps { + timeout(time: 20, unit: 'MINUTES') { + pre_test() + script { + sh ''' + echo "worker04 build done" + date + ''' + } + } + } + } + stage ('build worker05') { + agent {label " worker05 "} + steps { + timeout(time: 20, unit: 'MINUTES') { + pre_test() + script { + sh ''' + echo "worker05 build done" + date + ''' + } + } + } + } + } + } + stage('run test') { + parallel { + stage ('build worker06_arm64') { + agent {label " worker06_arm64 "} + steps { + timeout(time: 20, unit: 'MINUTES') { + pre_test() + script { + sh ''' + echo "worker06_arm64 build done" + date + ''' + } + } + } + } + stage ('build worker07_arm64') { + agent {label " worker07_arm64 "} + steps { + timeout(time: 20, unit: 'MINUTES') { + pre_test() + script { + sh ''' + echo "worker07_arm64 build done" + date + ''' + } + } + } + } + stage ('build Mac_catalina ') { + agent {label " Mac_catalina "} + steps { + timeout(time: 20, unit: 'MINUTES') { + pre_test_mac() + script { + sh ''' + echo "Mac_catalina build done" + date + ''' + } + } + } + } + stage('run cases') { + steps { + sh ''' + date + hostname + ''' + timeout(time: 60, unit: 'MINUTES') { + sh ''' + date + cd ${WKC}/tests/parallel_test + time ./run.sh -m m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME} + date + hostname + ''' + } + } + } + } + } + } + post { + success { + emailext ( + subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS", + body: """ + + + + + + + + + + + + +
+
+ 构建信息 +
+
+
    +
    +
  • 构建名称>>分支:${env.BRANCH_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${env.CHANGE_AUTHOR}
  • +
  • 提交信息:${env.CHANGE_TITLE}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
    +
+
+ + """, + to: "${env.CHANGE_AUTHOR_EMAIL}", + from: "support@taosdata.com" + ) + } + failure { + emailext ( + subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL", + body: """ + + + + + + + + + + + + +
+
+ 构建信息 +
+
+
    +
    +
  • 构建名称>>分支:${env.BRANCH_NAME}
  • +
  • 构建结果: Failure
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${env.CHANGE_AUTHOR}
  • +
  • 提交信息:${env.CHANGE_TITLE}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
    +
+
+ + """, + to: "${env.CHANGE_AUTHOR_EMAIL}", + from: "support@taosdata.com" + ) + } + } +} diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task new file mode 100644 index 0000000000000000000000000000000000000000..788031b1067c01720726cf3513e84d5faa63fed1 --- /dev/null +++ b/tests/parallel_test/cases.task @@ -0,0 +1,768 @@ +113,,pytest,python3 test.py -f update/append_commit_data.py +# 20,,pytest,python3 insert/retentionpolicy.py change date time +368,,develop-test/3-connectors/java,bash test.sh +299,,pytest,python3 test.py -f update/merge_commit_data-0.py +290,,pytest,python3 test.py -f update/merge_commit_data.py +263,,script,./test.sh -f general/stream/stream_3.sim +259,,script,./test.sh -f general/stream/restart_stream.sim +241,,pytest,python3 test.py -f update/merge_commit_data2.py +224,,pytest,python3 test.py -f query/queryNullValueTest.py +221,,pytest,python3 test.py -f query/nestedQuery/nestedQuery.py +220,,pytest,python3 test.py -f update/merge_commit_data2_update0.py +208,,pytest,python3 test.py -f update/merge_commit_last.py +203,,pytest,python3 test.py -f update/merge_commit_last-0.py +191,,pytest,python3 test.py -f stream/stream1.py +188,,pytest,python3 test.py -f stream/stream2.py +151,,script,./test.sh -f unique/vnode/replica3_repeat.sim +143,,script,./test.sh -f general/stream/stream_restart.sim +138,,pytest,python3 test.py -f insert/randomNullCommit.py +132,,pytest,python3 test.py -f functions/function_interp.py +131,,system-test,bash 3-connectors/python/test.sh +128,,develop-test,bash 3-connectors/python/test.sh +127,,script,./test.sh -f general/stream/table_replica1_vnoden.sim +127,,script,./test.sh -f general/stream/table_del.sim +127,,script,./test.sh -f general/stream/metrics_replica1_vnoden.sim +127,,script,./test.sh -f general/stream/metrics_del.sim +127,,pytest,python3 test.py -f query/queryConnection.py +121,2,script,./test.sh -f unique/cluster/balance3.sim +114,,script,./test.sh -f general/db/alter_tables_d2.sim +113,,pytest,python3 test.py -f update/append_commit_last.py +111,,script,./test.sh -f unique/vnode/replica2_repeat.sim +111,,script,./test.sh -f unique/vnode/many.sim +110,,pytest,python3 test.py -f update/append_commit_last-0.py +109,,script,./test.sh -f unique/cluster/vgroup100.sim +102,,script,./test.sh -f general/parser/selectResNum.sim +99,,script,./test.sh -f general/parser/repeatAlter.sim +89,,script,./test.sh -f unique/big/balance.sim +86,,script,./test.sh -f unique/dnode/balance1.sim +86,,script,./test.sh -f unique/cluster/balance2.sim +85,,pytest,python3 test.py -f insert/boundary2.py +83,,script,./test.sh -f general/parser/limit2.sim +83,,script,./test.sh -f general/parser/limit1_tblocks100.sim +82,,script,./test.sh -f general/parser/limit1.sim +82,,pytest,python3 test.py -f query/last_row_cache.py +# 81,,develop-test,bash 3-connectors/c#/test.sh +80,,system-test,bash 3-connectors/c#/test.sh +80,,develop-test,bash 3-connectors/nodejs/test.sh +79,,script,./test.sh -f general/db/alter_tables_v4.sim +79,,pytest,python3 test.py -f insert/verifyMemToDiskCrash.py +78,2,script,./test.sh -f unique/dnode/balance3.sim +78,,script,./test.sh -f unique/cluster/balance1.sim +# 77,,system-test,bash 3-connectors/nodejs/test.sh +76,,script,./test.sh -f unique/import/replica3.sim +76,,script,./test.sh -f unique/db/replica_add13.sim +75,,script,./test.sh -f unique/db/replica_reduce32.sim +75,,script,./test.sh -f unique/db/replica_add23.sim +75,,script,./test.sh -f unique/account/usage.sim +74,,script,./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim +74,,script,./test.sh -f unique/arbitrator/dn3_mn1_r3_vnode_delDir.sim +71,,script,./test.sh -f unique/mnode/mgmtr2.sim +69,,script,./test.sh -f unique/import/replica3.sim +69,,script,./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim +69,,pytest,python3 test.py -f functions/function_sample.py -r 1 +68,,script,./test.sh -f unique/mnode/mgmt20.sim +68,,script,./test.sh -f general/parser/groupby.sim +67,,script,./test.sh -f unique/dnode/data1.sim +67,,pytest,python3 test.py -f client/taoshellCheckCase.py +66,,script,./test.sh -f unique/db/delete.sim +66,,script,./test.sh -f unique/account/authority.sim +66,,script,./test.sh -f general/db/alter_tables_v1.sim +65,,script,./test.sh -f unique/vnode/replica3_basic.sim +65,,script,./test.sh -f unique/import/replica2.sim +65,2,script,./test.sh -f unique/dnode/m2.sim +65,,script,./test.sh -f unique/db/delete_part.sim +65,,script,./test.sh -f issue/TD-2713.sim +64,,script,./test.sh -f unique/arbitrator/dn3_mn1_vnode_delDir.sim +63,2,script,./test.sh -f unique/dnode/m3.sim +63,,script,./test.sh -f unique/db/replica_reduce31.sim +63,,script,./test.sh -f unique/arbitrator/dn3_mn1_vnode_noCorruptFile_offline.sim +63,,script,./test.sh -f general/parser/where.sim +63,,script,./test.sh -f general/parser/union.sim +63,,pytest,python3 test.py -f stream/new.py +62,,script,./test.sh -f unique/dnode/vnode_clean.sim +61,,script,./test.sh -f unique/db/replica_part.sim +61,,pytest,python3 test.py -f stream/cqSupportBefore1970.py +60,,script,./test.sh -f unique/vnode/replica2_basic2.sim +60,,script,./test.sh -f general/parser/first_last.sim +60,,script,./test.sh -f general/db/delete_reuse1.sim +60,,pytest,python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py +60,,pytest,python3 test.py -f stream/sys.py +59,,script,./test.sh -f unique/import/replica2.sim +59,,script,./test.sh -f unique/dnode/balance2.sim +59,,script,./test.sh -f general/parser/projection_limit_offset.sim +58,,script,./test.sh -f unique/dnode/balancex.sim +58,,pytest,python3 test.py -f stream/table_1.py +57,,script,./test.sh -f unique/dnode/offline2.sim +57,,script,./test.sh -f unique/arbitrator/replica_changeWithArbitrator.sim +57,,script,./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim +57,,script,./test.sh -f general/db/delete_writing1.sim +56,,script,./test.sh -f general/parser/commit.sim +55,,script,./test.sh -f unique/dnode/remove2.sim +55,,script,./test.sh -f unique/dnode/remove1.sim +55,,script,./test.sh -f general/db/delete_reusevnode.sim +55,,pytest,python3 test.py -f stream/metric_n.py +55,,develop-test,python3 ./test.py -f 1-insert/batchInsert.py +,,pytest,python3 ./test.py -f query/queryNcharNull.py +,,pytest,python3 ./test.py -f alter/alterBackQuoteCol.py +,,pytest,python3 ./test.py -f tsdb/tsdbComp.py +# ,,system-test,python3 ./test.py -f 0-management/1-stable/create_col_tag.py no such file +# ,,develop-test,python3 ./test.py -f 0-management/3-tag/json_tag.py no such file +,,system-test,python3 ./test.py -f 2-query/TD-11389.py +,,system-test,python3 ./test.py -f 2-query/TD-12909.py +,,system-test,python3 ./test.py -f 2-query/TD-12427.py +,,develop-test,python3 ./test.py -f 1-insert/uppercase_in_stmt.py +,,develop-test,python3 ./test.py -f 2-query/constant_compare.py +,,develop-test,python3 ./test.py -f 2-query/function_mavg.py +,,develop-test,python3 ./test.py -f 2-query/func_compare.py +,,develop-test,python3 ./test.py -f 2-query/diff_ignore_negative.py +,,develop-test,python3 ./test.py -f 2-query/diff_funcs.py +,,develop-test,python3 ./test.py -f 2-query/TD-13246.py +,,develop-test,python3 ./test.py -f 2-query/TD-6347.py +,,develop-test,python3 ./test.py -f 2-query/math_funcs.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/limit_offset_json.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/commandline.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/json_tag.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/insert_alltypes_json.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/query_json.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_interlace.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_telnet_alltypes.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/subscripe_json.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/default_json.py +,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/invalid_commandline.py +54,,script,./test.sh -f unique/arbitrator/dn3_mn1_vnode_nomaster.sim +54,,script,./test.sh -f unique/arbitrator/dn3_mn1_r2_vnode_delDir.sim +54,,script,./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim +54,,script,./test.sh -f general/parser/sliding.sim +54,,script,./test.sh -f general/parser/interp_full.sim +54,,pytest,python3 test.py -f stream/table_n.py +54,,pytest,python3 test.py -f stream/metric_1.py +53,,script,./test.sh -f unique/mnode/mgmt26.sim +53,,script,./test.sh -f unique/mnode/mgmt23.sim +53,,script,./test.sh -f unique/mnode/mgmt22.sim +53,,script,./test.sh -f unique/db/replica_add12.sim +# 53,,pytest,python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py +52,,script,./test.sh -f unique/account/pass_alter.sim +52,,script,./test.sh -f general/parser/topbot.sim +52,,script,./test.sh -f general/parser/join_manyblocks.sim +51,,script,./test.sh -f unique/dnode/reason.sim +51,,script,./test.sh -f unique/cluster/alter.sim +50,,script,./test.sh -f unique/mnode/mgmt34.sim +50,,script,./test.sh -f unique/dnode/datatrans_3node_2.sim +49,,script,./test.sh -f unique/db/commit.sim +48,,script,./test.sh -f unique/dnode/datatrans_3node.sim +48,,script,./test.sh -f unique/big/tcp.sim +48,,script,./test.sh -f general/parser/nestquery.sim +# 48,,script,./test.sh -f general/parser/col_arithmetic_operation.sim +48,,pytest,python3 test.py -f query/queryStateWindow.py +47,,script,./test.sh -f unique/stable/balance_replica1.sim +47,,script,./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim +47,,script,./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim +47,,script,./test.sh -f unique/dnode/monitor.sim +47,,script,./test.sh -f unique/big/maxvnodes.sim +47,,pytest,python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py +45,,script,./test.sh -f unique/mnode/mgmt24.sim +45,,pytest,python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py +44,,script,./test.sh -f unique/vnode/replica3_vgroup.sim +44,,script,./test.sh -f unique/mnode/mgmt33.sim +44,,pytest,python3 test.py -f wal/sdbComp.py +43,,script,./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim +43,,script,./test.sh -f unique/dnode/offline1.sim +43,,script,./test.sh -f general/import/replica1.sim +43,,pytest,python3 test.py -f query/select_last_crash.py +43,,pytest,python3 test.py -f import_merge/import_update_2.py +42,,script,./test.sh -f unique/dnode/monitor_bug.sim +42,,script,./test.sh -f unique/arbitrator/sync_replica3_alterTable_drop.sim +42,,script,./test.sh -f general/wal/kill.sim +41,,script,./test.sh -f unique/dnode/lossdata.sim +41,,script,./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim +41,,script,./test.sh -f unique/arbitrator/sync_replica2_alterTable_drop.sim +41,,script,./test.sh -f general/alter/count.sim +# 41,,pytest,python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +41,,pytest,python3 test.py -f import_merge/import_update_0.py +40,,script,./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim +40,,script,./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim +40,,script,./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim +39,,script,./test.sh -f unique/mnode/mgmt25.sim +39,,script,./test.sh -f unique/arbitrator/offline_replica3_alterTable_online.sim +39,,script,./test.sh -f general/wal/kill.sim +39,,script,./test.sh -f general/db/alter_vgroups.sim +39,,pytest,python3 test.py -f update/allow_update.py +# 39,,pytest,python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py +39,,pytest,python3 test.py -f query/last_cache.py +38,,script,./test.sh -f unique/dnode/offline3.sim +38,,script,./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim +38,,script,./test.sh -f general/table/delete_reuse2.sim +38,,script,./test.sh -f general/insert/insert_drop.sim +37,,script,./test.sh -f unique/mnode/mgmt30.sim +37,,script,./test.sh -f unique/column/replica3.sim +37,,script,./test.sh -f unique/arbitrator/offline_replica2_createTable_online.sim +37,,script,./test.sh -f unique/arbitrator/offline_replica2_alterTable_online.sim +37,,script,./test.sh -f general/table/delete_reuse1.sim +37,,script,./test.sh -f general/db/delete_reuse2.sim +36,,script,./test.sh -f unique/stable/replica3_vnode3.sim +36,,script,./test.sh -f unique/stable/dnode2_stop.sim +36,,script,./test.sh -f unique/arbitrator/sync_replica2_alterTable_add.sim +36,,script,./test.sh -f unique/arbitrator/offline_replica3_createTable_online.sim +36,,script,./test.sh -f unique/arbitrator/offline_replica2_dropTable_online.sim +36,,script,./test.sh -f general/parser/tbnameIn.sim +36,,pytest,python3 test.py -f tools/taosdumpTestNanoSupport.py +35,,script,./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim +35,,script,./test.sh -f unique/arbitrator/offline_replica3_alterTag_online.sim +35,,script,./test.sh -f unique/arbitrator/offline_replica2_alterTag_online.sim +35,,script,./test.sh -f issue/TD-2680.sim +35,,pytest,python3 test.py -f update/update_options.py +35,,pytest,python3 test.py -f insert/flushwhiledrop.py +35,,pytest,python3 test.py -f import_merge/import_update_1.py +34,,script,./test.sh -f unique/dnode/alternativeRole.sim +34,,script,./test.sh -f unique/arbitrator/offline_replica3_dropTable_online.sim +34,,script,./test.sh -f unique/arbitrator/offline_replica2_dropDb_online.sim +34,,script,./test.sh -f issue/TD-2677.sim +33,,script,./test.sh -f general/table/delete_writing.sim +33,,script,./test.sh -f general/parser/slimit.sim +33,,script,./test.sh -f general/db/topic1.sim +32,,system-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestColTag.py +32,,script,./test.sh -f unique/mnode/mgmt21.sim +32,,script,./test.sh -f unique/arbitrator/offline_replica3_dropDb_online.sim +32,,script,./test.sh -f unique/arbitrator/dn3_mn2_killDnode.sim +32,,script,./test.sh -f general/db/delete_reusevnode2.sim +32,,script,./test.sh -f general/compress/compress.sim +32,,script,./test.sh -f general/compress/commitlog.sim +32,,pytest,python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertAllType.py +31,,script,./test.sh -f general/user/pass_alter.sim +31,,script,./test.sh -f general/stable/disk.sim +31,,script,./test.sh -f general/parser/lastrow.sim +31,,script,./test.sh -f general/db/delete_writing2.sim +31,,script,./test.sh -f general/alter/cached_schema_after_alter.sim +30,2,script,./test.sh -f unique/dnode/simple.sim +30,,script,./test.sh -f unique/account/account_delete.sim +30,,script,./test.sh -f general/import/commit.sim +30,,script,./test.sh -f general/compute/diff2.sim +29,,system-test,python3 ./test.py -f 0-others/create_col_tag.py +29,,script,./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim +29,,script,./test.sh -f general/wal/maxtables.sim +29,,script,./test.sh -f general/compress/compress2.sim +29,,script,./test.sh -f general/alter/insert1.sim +29,,pytest,python3 test.py -f functions/function_mavg.py +28,,script,./test.sh -f general/user/authority.sim +28,,script,./test.sh -f general/parser/select_with_tags.sim +28,,script,./test.sh -f general/insert/query_multi_file.sim +28,,script,./test.sh -f general/compress/uncompress.sim +28,,script,./test.sh -f general/column/commit.sim +28,,pytest,python3 test.py -f queryCount.py +28,,pytest,python3 test.py -f alter/alter_table.py +27,,script,./test.sh -f unique/cluster/cache.sim +27,,script,./test.sh -f general/user/monitor.sim +27,,script,./test.sh -f general/parser/slimit_alter_tags.sim +27,,script,./test.sh -f general/column/table.sim +27,,script,./test.sh -f general/column/metrics.sim +27,,script,./test.sh -f general/alter/cached_schema_after_alter.sim +27,,pytest,python3 test.py -f functions/function_csum.py +26,,script,./test.sh -f general/parser/set_tag_vals.sim +26,,script,./test.sh -f general/db/nosuchfile.sim +26,,script,./test.sh -f general/alter/table.sim +26,,pytest,python3 test.py -f stable/query_after_reset.py +25,,script,./test.sh -f unique/stable/dnode3.sim +25,,script,./test.sh -f general/parser/auto_create_tb.sim +25,,script,./test.sh -f general/alter/metrics.sim +25,,pytest,python3 test.py -f tools/taosdemoTestInterlace.py +25,,pytest,python3 test.py -f alter/alter_cacheLastRow.py +25,,develop-test,bash 3-connectors/go/test.sh +24,,script,./test.sh -f general/wal/maxtables.sim +24,,script,./test.sh -f general/connection/test_old_data.sim +24,,script,./test.sh -f general/cache/restart_metrics.sim +24,,script,./test.sh -f general/alter/insert2.sim +24,,pytest,python3 test.py -f tag_lite/datatype-without-alter.py +23,,script,./test.sh -f general/parser/select_from_cache_disk.sim +23,,script,./test.sh -f general/parser/mixed_blocks.sim +23,,script,./test.sh -f general/import/large.sim +22,,script,./test.sh -f general/stable/metrics.sim +22,,script,./test.sh -f general/parser/slimit1.sim +22,,script,./test.sh -f general/parser/limit.sim +22,,script,./test.sh -f general/insert/tcp.sim +22,,script,./test.sh -f general/cache/restart_table.sim +22,,pytest,python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertShell.py +22,,pytest,python3 test.py -f tag_lite/datatype.py +22,,pytest,python3 test.py -f import_merge/importDataLastH.py +22,,pytest,python3 test.py -f import_merge/importDataHO2.py +22,,pytest,python3 test.py -f import_merge/importCacheFileH.py +21,,system-test,python3 ./test.py -f 2-query/TD-12204.py +21,,script,./test.sh -f general/parser/single_row_in_tb.sim +21,,script,./test.sh -f general/parser/last_cache.sim +21,,script,./test.sh -f general/parser/join_multivnode.sim +21,,script,./test.sh -f general/db/repeat.sim +21,,pytest,python3 test.py -f import_merge/importLastT.py +21,,pytest,python3 test.py -f import_merge/importDataLastS.py +21,,pytest,python3 test.py -f import_merge/importDataLastHPO.py +21,,pytest,python3 test.py -f import_merge/importDataHO.py +21,,pytest,python3 test.py -f import_merge/importDataH2.py +21,,pytest,python3 test.py -f import_merge/importCacheFileT.py +21,,pytest,python3 test.py -f import_merge/importCacheFileTPO.py +21,,pytest,python3 test.py -f import_merge/importCacheFileTO.py +21,,pytest,python3 test.py -f import_merge/importCacheFileSub.py +21,,pytest,python3 test.py -f import_merge/importCacheFileS.py +21,,pytest,python3 test.py -f import_merge/importCacheFileHPO.py +21,,pytest,python3 test.py -f import_merge/importCacheFileHO.py +20,,script,./test.sh -f unique/account/user_create.sim +20,,script,./test.sh -f general/parser/auto_create_tb_drop_tb.sim +20,,script,./test.sh -f general/import/basic.sim +20,2,script,./test.sh -f general/alter/dnode.sim +20,,pytest,python3 test.py -f query/query.py +20,,pytest,python3 test.py -f import_merge/importLastTO.py +20,,pytest,python3 test.py -f import_merge/importDataSub.py +20,,pytest,python3 test.py -f import_merge/importDataLastSub.py +19,,script,./test.sh -f unique/stable/dnode2.sim +19,,script,./test.sh -f general/db/vnodes.sim +19,,pytest,python3 test.py -f tools/taosdumpTest3.py +19,,pytest,python3 test.py -f query/udf.py +19,,pytest,python3 test.py -f import_merge/importLastTPO.py +19,,pytest,python3 test.py -f import_merge/importDataLastHO.py +19,,pytest,python3 test.py -f import_merge/importDataHPO.py +19,,pytest,python3 test.py -f import_merge/importCSV.py +19,,pytest,python3 test.py -f functions/function_operations.py -r 1 +18,,script,./test.sh -f unique/stable/replica3_dnode6.sim +18,,script,./test.sh -f general/vector/table_field.sim +18,,script,./test.sh -f general/vector/single.sim +18,,script,./test.sh -f general/parser/join.sim +18,,script,./test.sh -f general/insert/query_block2_file.sim +18,,script,./test.sh -f general/db/tables.sim +18,,pytest,python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py +18,,pytest,python3 test.py -f stream/history.py +18,,pytest,python3 test.py -f query/queryTscomputWithNow.py +18,,pytest,python3 test.py -f query/queryInterval.py +17,,script,./test.sh -f unique/account/user_len.sim +17,,script,./test.sh -f general/vector/metrics_mix.sim +17,,script,./test.sh -f general/user/user_create.sim +17,,script,./test.sh -f general/parser/import_commit3.sim +17,,script,./test.sh -f general/parser/function.sim +17,,script,./test.sh -f general/insert/query_file_memory.sim +17,,script,./test.sh -f general/field/binary.sim +17,,script,./test.sh -f general/field/4.sim +17,,pytest,python3 test.py -f subscribe/supertable.py +17,,pytest,python3 test.py -f query/query1970YearsAf.py +16,,system-test,python3 ./test.py -f 2-query/TD-12228.py +16,,script,./test.sh -f unique/account/account_len.sim +16,,script,./test.sh -f general/vector/table_time.sim +16,,script,./test.sh -f general/vector/multi.sim +16,,script,./test.sh -f general/vector/metrics_time.sim +16,,script,./test.sh -f general/vector/metrics_tag.sim +16,,script,./test.sh -f general/vector/metrics_query.sim +16,,script,./test.sh -f general/vector/metrics_field.sim +16,,script,./test.sh -f general/user/pass_len.sim +16,,script,./test.sh -f general/stable/show.sim +16,,script,./test.sh -f general/stable/dnode3.sim +16,,script,./test.sh -f general/parser/import_commit2.sim +16,,script,./test.sh -f general/parser/import_commit1.sim +16,,script,./test.sh -f general/parser/fill_stb.sim +16,,script,./test.sh -f general/parser/create_mt.sim +16,,script,./test.sh -f general/insert/query_block1_file.sim +16,,script,./test.sh -f general/field/single.sim +16,,script,./test.sh -f general/field/6.sim +16,2,script,./test.sh -f general/db/topic2.sim +16,,script,./test.sh -f general/connection/connection.sim +16,,pytest,python3 test.py -f query/bug1471.py +16,,pytest,python3 test.py -f import_merge/importSRestart.py +16,,pytest,python3 test.py -f functions/function_last_row.py -r 1 +16,,pytest,python3 test.py -f functions/function_first.py -r 1 +15,,system-test,python3 ./test.py -f 4-taosAdapter/taosAdapter_query.py +15,,script,./test.sh -f unique/db/replica_reduce21.sim +15,,script,./test.sh -f unique/account/paras.sim +15,,script,./test.sh -f unique/account/account_create.sim +15,,script,./test.sh -f general/vector/table_query.sim +15,,script,./test.sh -f general/vector/table_mix.sim +15,,script,./test.sh -f general/user/user_len.sim +15,,script,./test.sh -f general/parser/timestamp.sim +15,,script,./test.sh -f general/parser/tags_filter.sim +15,,script,./test.sh -f general/parser/select_across_vnodes.sim +15,,script,./test.sh -f general/parser/nchar.sim +15,,script,./test.sh -f general/parser/dbtbnameValidate.sim +15,,script,./test.sh -f general/parser/binary_escapeCharacter.sim +15,,script,./test.sh -f general/parser/alter_stable.sim +15,,script,./test.sh -f general/insert/query_block2_memory.sim +15,,script,./test.sh -f general/insert/query_block1_memory.sim +15,,script,./test.sh -f general/insert/basic.sim +15,,script,./test.sh -f general/field/tinyint.sim +15,,script,./test.sh -f general/field/bool.sim +15,,script,./test.sh -f general/field/5.sim +15,,script,./test.sh -f general/cache/new_metrics.sim +15,,script,./test.sh -f general/alter/import.sim +15,,pytest,python3 test.py -f tools/taosdemoTestTblAlt.py +15,,pytest,python3 test.py -f query/queryNormal.py +15,,pytest,python3 test.py -f query/queryLimit.py +15,,pytest,python3 test.py -f query/distinctOneColTb.py +15,,pytest,python3 test.py -f functions/function_sum.py -r 1 +15,,pytest,python3 test.py -f functions/function_spread.py -r 1 +15,,pytest,python3 test.py -f functions/function_min.py -r 1 +15,,pytest,python3 test.py -f functions/function_max.py -r 1 +15,,pytest,python3 test.py -f functions/function_last.py -r 1 +15,,pytest,python3 test.py -f functions/function_avg.py -r 1 +14,,system-test,python3 ./test.py -f 5-taos-tools/TD-12478.py +14,,script,./test.sh -f unique/account/basic.sim +14,,script,./test.sh -f general/table/limit.sim +14,,script,./test.sh -f general/table/createmulti.sim +14,,script,./test.sh -f general/parser/null_char.sim +14,,script,./test.sh -f general/parser/insert_tb.sim +14,,script,./test.sh -f general/parser/fill.sim +14,,script,./test.sh -f general/parser/create_tb.sim +14,,script,./test.sh -f general/parser/create_db.sim +14,,script,./test.sh -f general/parser/alter.sim +14,,script,./test.sh -f general/field/smallint.sim +14,,script,./test.sh -f general/field/bigint.sim +14,,script,./test.sh -f general/field/3.sim +14,,script,./test.sh -f general/field/2.sim +14,,script,./test.sh -f general/compute/avg.sim +# n14,,pytest,python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +14,,pytest,python3 test.py -f query/queryWithTaosdKilled.py +14,,pytest,python3 test.py -f query/queryStddevWithGroupby.py +14,,pytest,python3 test.py -f query/queryJoin10tables.py +14,,pytest,python3 test.py -f insert/insertFromCSV.py +14,,pytest,python3 test.py -f import_merge/importSubRestart.py +14,,pytest,python3 test.py -f import_merge/importBlock1T.py +14,,pytest,python3 test.py -f functions/function_twa.py -r 1 +14,,pytest,python3 test.py -f functions/function_top.py -r 1 +14,,pytest,python3 test.py -f functions/function_stddev.py -r 1 +14,,pytest,python3 test.py -f functions/function_percentile.py -r 1 +14,,pytest,python3 test.py -f functions/function_leastsquares.py -r 1 +14,,pytest,python3 test.py -f functions/function_diff.py -r 1 +14,,pytest,python3 test.py -f functions/function_count.py -r 1 +14,,pytest,python3 test.py -f client/noConnectionErrorTest.py +14,,develop-test,bash 3-connectors/rust/test.sh +13,,system-test,python3 ./test.py -f 5-taos-tools/basic.py +13,,system-test,bash 3-connectors/go/test.sh +13,,script,./test.sh -f unique/account/pass_len.sim +13,,script,./test.sh -f general/table/vgroup.sim +13,,script,./test.sh -f general/table/tinyint.sim +13,,script,./test.sh -f general/table/float.sim +13,,script,./test.sh -f general/stable/vnode3.sim +13,,script,./test.sh -f general/stable/values.sim +13,,script,./test.sh -f general/stable/refcount.sim +13,,script,./test.sh -f general/parser/tags_dynamically_specifiy.sim +13,,script,./test.sh -f general/parser/select_distinct_tag.sim +13,,script,./test.sh -f general/compute/sum.sim +13,,script,./test.sh -f general/compute/percentile.sim +13,,script,./test.sh -f general/compute/min.sim +13,,script,./test.sh -f general/compute/last.sim +13,,script,./test.sh -f general/compute/bottom.sim +13,,pytest,python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py +13,,pytest,python3 test.py -f query/queryInsertValue.py +13,,pytest,python3 test.py -f query/queryGroupTbname.py +13,,pytest,python3 test.py -f insert/openTsdbTelnetLinesInsert.py +13,,pytest,python3 test.py -f insert/metadataUpdate.py +13,,pytest,python3 test.py -f insert/before_1970.py +13,,pytest,python3 test.py -f import_merge/importTRestart.py +13,,pytest,python3 test.py -f import_merge/importTPORestart.py +13,,pytest,python3 test.py -f import_merge/importTORestart.py +13,,pytest,python3 test.py -f import_merge/importTailPartOverlap.py +13,,pytest,python3 test.py -f import_merge/importLastSub.py +13,,pytest,python3 test.py -f import_merge/importHead.py +13,,pytest,python3 test.py -f import_merge/importDataLastT.py +13,,pytest,python3 test.py -f import_merge/importBlockbetween.py +13,,pytest,python3 test.py -f import_merge/importBlock2T.py +13,,pytest,python3 test.py -f import_merge/importBlock2TO.py +13,,pytest,python3 test.py -f import_merge/importBlock2Sub.py +13,,pytest,python3 test.py -f import_merge/importBlock1TO.py +13,,pytest,python3 test.py -f import_merge/importBlock1S.py +13,,pytest,python3 test.py -f import_merge/importBlock1HO.py +13,,pytest,python3 test.py -f functions/function_bottom.py -r 1 +13,,develop-test,bash 3-connectors/restful/test.sh +12,,system-test,bash 3-connectors/rust/test.sh +12,,system-test,bash 3-connectors/restful/test.sh +12,,system-test,bash 3-connectors/java/test.sh +12,,script,./test.sh -f general/table/table.sim +12,,script,./test.sh -f general/table/table_len.sim +12,,script,./test.sh -f general/table/int.sim +12,,script,./test.sh -f general/table/double.sim +12,,script,./test.sh -f general/table/describe.sim +12,,script,./test.sh -f general/table/date.sim +12,,script,./test.sh -f general/table/column_num.sim +12,,script,./test.sh -f general/table/column_name.sim +12,,script,./test.sh -f general/table/column2.sim +12,,script,./test.sh -f general/table/bool.sim +12,,script,./test.sh -f general/table/binary.sim +12,,script,./test.sh -f general/table/bigint.sim +12,,script,./test.sh -f general/table/basic3.sim +12,,script,./test.sh -f general/table/basic2.sim +12,,script,./test.sh -f general/parser/udf.sim +12,,script,./test.sh -f general/parser/udf_dll.sim +12,,script,./test.sh -f general/parser/columnValue.sim +12,,script,./test.sh -f general/db/len.sim +12,,script,./test.sh -f general/db/basic.sim +12,,script,./test.sh -f general/db/basic5.sim +12,,script,./test.sh -f general/db/basic3.sim +12,,script,./test.sh -f general/db/basic2.sim +12,,script,./test.sh -f general/db/basic1.sim +12,,script,./test.sh -f general/db/alter_option.sim +12,,script,./test.sh -f general/compute/top.sim +12,,script,./test.sh -f general/compute/stddev.sim +12,,script,./test.sh -f general/compute/null.sim +12,,script,./test.sh -f general/compute/max.sim +12,,script,./test.sh -f general/compute/leastsquare.sim +12,,script,./test.sh -f general/compute/interval.sim +12,,script,./test.sh -f general/compute/first.sim +12,,script,./test.sh -f general/compute/diff.sim +12,,script,./test.sh -f general/compute/count.sim +12,,pytest,python3 test.py -f update/allow_update-0.py +12,,pytest,python3 test.py -f table/alter_wal0.py +12,,pytest,python3 test.py -f import_merge/importTail.py +12,,pytest,python3 test.py -f import_merge/importTailOverlap.py +12,,pytest,python3 test.py -f import_merge/importSpan.py +12,,pytest,python3 test.py -f import_merge/importLastS.py +12,,pytest,python3 test.py -f import_merge/importLastH.py +12,,pytest,python3 test.py -f import_merge/importLastHO.py +12,,pytest,python3 test.py -f import_merge/importInsertThenImport.py +12,,pytest,python3 test.py -f import_merge/importDataT.py +12,,pytest,python3 test.py -f import_merge/importDataTPO.py +12,,pytest,python3 test.py -f import_merge/importDataS.py +12,,pytest,python3 test.py -f import_merge/importDataLastTPO.py +12,,pytest,python3 test.py -f import_merge/importDataLastTO.py +12,,pytest,python3 test.py -f import_merge/importBlock2TPO.py +12,,pytest,python3 test.py -f import_merge/importBlock2S.py +12,,pytest,python3 test.py -f import_merge/importBlock2H.py +12,,pytest,python3 test.py -f import_merge/importBlock2HPO.py +12,,pytest,python3 test.py -f import_merge/importBlock2HO.py +12,,pytest,python3 test.py -f import_merge/importBlock1TPO.py +12,,pytest,python3 test.py -f import_merge/importBlock1H.py +12,,pytest,python3 test.py -f import_merge/importBlock1HPO.py +12,,pytest,python3 test.py -f functions/variable_httpDbNameMandatory.py +12,,pytest,python3 test.py -f functions/function_round.py +12,,pytest,python3 test.py -f functions/function_percentile2.py +12,,pytest,python3 test.py -f functions/function_ceil.py +11,,script,./test.sh -f unique/stable/replica2_dnode4.sim +11,,script,./test.sh -f general/table/smallint.sim +11,,script,./test.sh -f general/table/db.table.sim +11,,script,./test.sh -f general/table/column_value.sim +11,,script,./test.sh -f general/table/basic1.sim +11,,script,./test.sh -f general/table/autocreate.sim +11,,script,./test.sh -f general/parser/udf_dll_stable.sim +11,,script,./test.sh -f general/parser/stableOp.sim +11,,script,./test.sh -f general/parser/having.sim +11,,script,./test.sh -f general/parser/having_child.sim +11,,script,./test.sh -f general/parser/between_and.sim +11,,script,./test.sh -f general/db/basic4.sim +11,,pytest,python3 testNoCompress.py +11,,pytest,python3 test.py -f import_merge/importToCommit.py +11,,pytest,python3 test.py -f import_merge/importLastHPO.py +11,,pytest,python3 test.py -f import_merge/importDataTO.py +11,,pytest,python3 test.py -f import_merge/importBlock1Sub.py +10,,system-test,python3 ./test.py -f 2-query/TD-12344.py +10,,script,./test.sh -f unique/stable/replica2_vnode3.sim +10,,pytest,python3 testCompress.py +10,,pytest,python3 test.py -f client/client.py +9,,script,./test.sh -f general/parser/alter1.sim +9,,script,./test.sh -f general/db/delete.sim +9,,pytest,python3 test.py -f tools/taosdemoTestLimitOffset.py +9,,pytest,python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +9,,pytest,python3 test.py -f stream/showStreamExecTimeisNull.py +9,,pytest,python3 test.py -f query/bug1876.py +9,,pytest,python3 test.py -f alter/alter_table_crash.py +9,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeSmallInt.py +9,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeFloat.py +8,,pytest,python3 test.py -f tools/taosdumpTest.py +8,,pytest,python3 test.py -f query/unionAllTest.py +8,,pytest,python3 test.py -f query/queryFilterTswithDateUnit.py +8,,pytest,python3 test.py -f query/queryDiffColsTagsAndOr.py +8,,pytest,python3 test.py -f query/nestedQuery/nestedQuery_datacheck.py +8,,pytest,python3 test.py -f query/bug1874.py +8,,pytest,python3 test.py -f functions/function_floor.py +8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py +8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py +8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeTinyInt.py +8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeInt.py +8,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeDouble.py +## 7,,system-test,python3 ./test.py -f 2-query/TD-12191.py +7,,pytest,python3 test.py -f tools/taosdumpTest2.py +7,,pytest,python3 test.py -f tools/taosdemoTestdatatype.py +7,,pytest,python3 test.py -f tag_lite/unsignedInt.py +7,,pytest,python3 test.py -f query/bug1875.py +7,,pytest,python3 test.py -f functions/function_stateWindow.py +7,,pytest,python3 test.py -f client/version.py +7,,pytest,python3 client/twoClients.py +7,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py +7,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py +7,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeJson.py +7,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeBool.py +7,,develop-test,python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeBigInt.py +6,,system-test,python3 ./test.py -f 2-query/TD-12229.py +6,,system-test,python3 ./test.py -f 2-query/TD-11943.py +## 6,,system-test,python3 ./test.py -f 2-query/TD-11483.py +6,,system-test,python3 ./test.py -f 2-query/function_elapsed.py +6,,pytest,python3 test.py -f wal/addOldWalTest.py +6,,pytest,python3 test.py -f topic/topicQuery.py +6,,pytest,python3 test.py -f tools/taosdemoTestWithoutMetric.py +6,,pytest,python3 test.py -f tools/taosdemoTest.py +6,,pytest,python3 test.py -f tag_lite/unsignedSmallint.py +6,,pytest,python3 test.py -f tag_lite/double.py +6,,pytest,python3 test.py -f tag_lite/alter_tag.py +6,,pytest,python3 test.py -f tag_lite/add.py +6,,pytest,python3 test.py -f tag_lite/6.py +6,,pytest,python3 test.py -f table/boundary.py +6,,pytest,python3 test.py -f query/subqueryFilter.py +6,,pytest,python3 test.py -f query/queryStableJoin.py +6,,pytest,python3 test.py -f query/querySort.py +6,,pytest,python3 test.py -f query/queryJoin.py +6,,pytest,python3 test.py -f query/queryGroupbySort.py +6,,pytest,python3 test.py -f query/queryFillTest.py +6,,pytest,python3 test.py -f query/filterAllUnsignedIntTypes.py +6,,pytest,python3 test.py -f query/bug2218.py +6,,pytest,python3 test.py -f query/bug2118.py +6,,pytest,python3 test.py -f query/bug2117.py +6,,pytest,python3 test.py -f perfbenchmark/bug3433.py +6,,pytest,python3 test.py -f insert/insert_before_use_db.py +6,,pytest,python3 test.py -f import_merge/importHPORestart.py +6,,pytest,python3 test.py -f import_merge/importHORestart.py +6,,pytest,python3 test.py -f functions/showOfflineThresholdIs864000.py +6,,pytest,python3 test.py -f functions/function_elapsed.py +6,,pytest,python3 test.py -f alter/alterColMultiTimes.py +6,,develop-test,python3 ./test.py -f 2-query/ts_2016.py +6,,develop-test,python3 ./test.py -f 2-query/escape.py +5,,system-test,python3 ./test.py -f 4-taosAdapter/taosAdapter_insert.py +5,,system-test,python3 ./test.py -f 2-query/TD-12340-12342.py +5,,system-test,python3 ./test.py -f 2-query/TD-12276.py +5,,system-test,python3 ./test.py -f 2-query/TD-12165.py +5,,system-test,python3 ./test.py -f 2-query/TD-12164.py +5,,system-test,python3 ./test.py -f 2-query/TD-12145.py +5,,system-test,python3 ./test.py -f 2-query/TD-11945_crash.py +5,,system-test,python3 ./test.py -f 2-query/TD-11256.py +5,,system-test,python3 test.py -f 1-insert/TD-11970.py +5,,system-test,python3 test.py -f 1-insert/Null_tag_Line_insert.py +5,,pytest,python3 test.py -f user/user_create.py +5,,pytest,python3 test.py -f tools/taosdemoTestWithJson.py +5,,pytest,python3 test.py -f tools/taosdemoTestSampleData.py +5,,pytest,python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py +5,,pytest,python3 testMinTablesPerVnode.py +5,,pytest,python3 test.py -f tag_lite/unsignedTinyint.py +5,,pytest,python3 test.py -f tag_lite/timestamp.py +5,,pytest,python3 test.py -f tag_lite/TestModifyTag.py +5,,pytest,python3 test.py -f tag_lite/json_tag_extra.py +5,,pytest,python3 test.py -f tag_lite/int_binary.py +5,,pytest,python3 test.py -f tag_lite/float.py +5,,pytest,python3 test.py -f tag_lite/drop_auto_create.py +5,,pytest,python3 test.py -f tag_lite/create.py +5,,pytest,python3 test.py -f tag_lite/change.py +5,,pytest,python3 test.py -f tag_lite/bool.py +5,,pytest,python3 test.py -f tag_lite/bool_binary.py +5,,pytest,python3 test.py -f tag_lite/5.py +5,,pytest,python3 test.py -f tag_lite/4.py +5,,pytest,python3 test.py -f table/db_table.py +5,,pytest,python3 test.py -f table/create_sensitive.py +5,,pytest,python3 test.py -f table/column_num.py +5,,pytest,python3 test.py -f subscribe/singlemeter.py +5,,pytest,python3 test.py -f stable/insert.py +5,,pytest,python3 test.py -f query/querySession.py +5,,pytest,python3 test.py -f query/querySecondtscolumnTowherenow.py +5,,pytest,python3 test.py -f query/queryRegex.py +5,,pytest,python3 test.py -f query/queryGroupbyWithInterval.py +5,,pytest,python3 test.py -f query/queryCountCSVData.py +5,,pytest,python3 test.py -f query/queryCnameDisplay.py +5,,pytest,python3 test.py -f query/queryBetweenAnd.py +5,,pytest,python3 test.py -f query/nestquery_last_row.py +5,,pytest,python3 test.py -f query/nestedQuery/queryWithSpread.py +5,,pytest,python3 test.py -f query/nestedQuery/queryInterval.py +5,,pytest,python3 test.py -f query/isNullTest.py +5,,pytest,python3 test.py -f query/filterOtherTypes.py +5,,pytest,python3 test.py -f query/filterFloatAndDouble.py +5,,pytest,python3 test.py -f query/computeErrorinWhere.py +5,,pytest,python3 test.py -f query/bug6586.py +5,,pytest,python3 test.py -f query/bug2143.py +5,,pytest,python3 test.py -f query/bug2119.py +5,,pytest,python3 test.py -f insert/unsignedTinyint.py +5,,pytest,python3 test.py -f insert/unsignedSmallint.py +5,,pytest,python3 test.py -f insert/unsignedInt.py +5,,pytest,python3 test.py -f insert/nchar-unicode.py +5,,pytest,python3 test.py -f insert/insert_locking.py +5,,pytest,python3 test.py -f insert/in_function.py +5,,pytest,python3 test.py -f insert/binary.py +5,,pytest,python3 test.py -f import_merge/importHRestart.py +5,,pytest,python3 test.py -f import_merge/importHeadOverlap.py +5,,pytest,python3 test.py -f functions/function_twa_test2.py +5,,pytest,python3 test.py -f functions/function_irate.py +5,,pytest,python3 test.py -f functions/function_derivative.py +5,,pytest,python3 test.py -f functions/function_count_last_stab.py +5,,pytest,python3 test.py -f functions/all_null_value.py +5,,pytest,python3 test.py -f client/nettest.py +5,,pytest,python3 test.py -f client/alterDatabase.py +5,,pytest,python3 test.py -f alter/alterTimestampColDataProcess.py +5,,pytest,python3 test.py -f alter/alter_keep.py +5,,pytest,python3 test.py -f account/account_create.py +5,,develop-test,python3 ./test.py -f 2-query/union-order.py +5,,develop-test,python3 ./test.py -f 2-query/timeline_agg_func_groupby.py +5,,develop-test,python3 ./test.py -f 2-query/session_two_stage.py +5,,develop-test,python3 ./test.py -f 0-others/TD-12435.py +5,,develop-test,python3 ./test.py -f 0-others/json_tag.py +4,,system-test,python3 test.py -f 4-taosAdapter/TD-12163.py +4,,system-test,python3 ./test.py -f 3-connectors/restful/restful_binddbname.py +4,,system-test,python3 ./test.py -f 2-query/TD-12614.py +4,,system-test,python3 ./test.py -f 2-query/TD-12014.py +4,,system-test,python3 ./test.py -f 2-query/TD-11978.py +4,,system-test,python3 ./test.py -f 2-query/TD-11969.py +4,,system-test,python3 ./test.py -f 2-query/TD-11561.py +4,,system-test,python3 test.py -f 1-insert/stmt_error.py +4,,pytest,python3 test.py -f user/pass_len.py +4,,pytest,python3 test.py -f TimeZone/TestCaseTimeZone.py +4,,pytest,python3 test.py -f tag_lite/unsignedBigint.py +4,,pytest,python3 test.py -f tag_lite/tinyint.py +4,,pytest,python3 test.py -f tag_lite/smallint.py +4,,pytest,python3 test.py -f tag_lite/set.py +4,,pytest,python3 test.py -f tag_lite/int.py +4,,pytest,python3 test.py -f tag_lite/int_float.py +4,,pytest,python3 test.py -f tag_lite/filter.py +4,,pytest,python3 test.py -f tag_lite/delete.py +4,,pytest,python3 test.py -f tag_lite/create-tags-boundary.py +4,,pytest,python3 test.py -f tag_lite/commit.py +4,,pytest,python3 test.py -f tag_lite/column.py +4,,pytest,python3 test.py -f tag_lite/bool_int.py +4,,pytest,python3 test.py -f tag_lite/binary_binary.py +4,,pytest,python3 test.py -f tag_lite/bigint.py +4,,pytest,python3 test.py -f tag_lite/3.py +4,,pytest,python3 test.py -f table/tablename-boundary.py +4,,pytest,python3 test.py -f table/max_table_length.py +4,,pytest,python3 test.py -f table/del_stable.py +4,,pytest,python3 test.py -f table/create_db_from_normal_db.py +4,,pytest,python3 test.py -f table/column_name.py +4,,pytest,python3 test.py -f table/alter_column.py +4,,pytest,python3 test.py -f query/sliding.py +4,,pytest,python3 test.py -f query/queryWildcardLength.py +4,,pytest,python3 test.py -f query/queryTsisNull.py +4,,pytest,python3 test.py -f query/queryTbnameUpperLower.py +4,,pytest,python3 test.py -f query/queryPriKey.py +4,,pytest,python3 test.py -f query/queryError.py +4,,pytest,python3 test.py -f query/queryBase.py +4,,pytest,python3 test.py -f query/natualInterval.py +4,,pytest,python3 test.py -f query/floatCompare.py +4,,pytest,python3 test.py -f query/filter.py +4,,pytest,python3 test.py -f query/filterCombo.py +4,,pytest,python3 test.py -f query/bug3375.py +4,,pytest,python3 test.py -f query/bug3351.py +4,,pytest,python3 test.py -f query/bug2281.py +4,,pytest,python3 test.py -f insert/unsignedBigint.py +4,,pytest,python3 test.py -f insert/tinyint.py +4,,pytest,python3 test.py -f insert/timestamp.py +4,,pytest,python3 test.py -f insert/specialSql.py +4,,pytest,python3 test.py -f insert/special_character_show.py +4,,pytest,python3 test.py -f insert/smallint.py +4,,pytest,python3 test.py -f insert/nchar.py +4,,pytest,python3 test.py -f insert/multi.py +4,,pytest,python3 test.py -f insert/modify_column.py +4,,pytest,python3 test.py -f insert/int.py +4,,pytest,python3 test.py -f insert/insertIntoTwoTables.py +4,,pytest,python3 test.py -f insert/insertDynamicColBeforeVal.py +4,,pytest,python3 test.py -f insert/float.py +4,,pytest,python3 test.py -f insert/double.py +4,,pytest,python3 test.py -f insert/date.py +4,,pytest,python3 test.py -f insert/bug3654.py +4,,pytest,python3 test.py -f insert/bool.py +4,,pytest,python3 test.py -f insert/bigint.py +4,,pytest,python3 test.py -f insert/basic.py +4,,pytest,python3 test.py -f insert/alterTableAndInsert.py +4,,pytest,python3 test.py -f import_merge/importHeadPartOverlap.py +4,,pytest,python3 test.py -f functions/function_stddev_td2555.py +4,,pytest,python3 test.py -f dbmgmt/nanoSecondCheck.py +4,,pytest,python3 bug2265.py +4,,pytest,python3 test.py -f alter/alterTabAddTagWithNULL.py +4,,pytest,python3 test.py -f alter/alter_debugFlag.py +4,,pytest,python3 test.py -f alter/alter_create_exception.py +3,,pytest,python3 test.py -f tag_lite/binary.py +3,,pytest,python3 test.py -f query/filterAllIntTypes.py +3,,develop-test,python3 ./test.py -f 2-query/ts_hidden_column.py +#2,,coredump-test,./test.sh diff --git a/tests/parallel_test/m.json b/tests/parallel_test/m.json new file mode 100644 index 0000000000000000000000000000000000000000..f86c571728962783867680901dfa2611c37e660e --- /dev/null +++ b/tests/parallel_test/m.json @@ -0,0 +1,30 @@ +[{ + "host":"192.168.0.210", + "username":"root", + "workdir":"/var/data/jenkins/workspace", + "thread":25 +}, +{ + "host":"192.168.0.211", + "username":"root", + "workdir":"/var/data/jenkins/workspace", + "thread":25 +}, +{ + "host":"192.168.0.212", + "username":"root", + "workdir":"/var/data/jenkins/workspace", + "thread":25 +}, +{ + "host":"192.168.0.213", + "username":"root", + "workdir":"/var/data/jenkins/workspace", + "thread":25 +}, +{ + "host":"192.168.0.214", + "username":"root", + "workdir":"/var/data/jenkins/workspace", + "thread":25 +}] diff --git a/tests/parallel_test/run.sh b/tests/parallel_test/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..026bfb020d9a77d5cd1b05e9030cfce69a7ba4c7 --- /dev/null +++ b/tests/parallel_test/run.sh @@ -0,0 +1,358 @@ +#!/bin/bash + +function usage() { + echo "$0" + echo -e "\t -m vm config file" + echo -e "\t -t task file" + echo -e "\t -b branch" + echo -e "\t -l log dir" + echo -e "\t -h help" +} + +while getopts "m:t:b:l:h" opt; do + case $opt in + m) + config_file=$OPTARG + ;; + t) + t_file=$OPTARG + ;; + b) + branch=$OPTARG + ;; + l) + log_dir=$OPTARG + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done +#config_file=$1 +if [ -z $config_file ]; then + usage + exit 1 +fi +if [ ! -f $config_file ]; then + echo "$config_file not found" + usage + exit 1 +fi +#t_file=$2 +if [ -z $t_file ]; then + usage + exit 1 +fi +if [ ! -f $t_file ]; then + echo "$t_file not found" + usage + exit 1 +fi +date_tag=`date +%Y%m%d-%H%M%S` +if [ -z $log_dir ]; then + log_dir="log/${branch}_${date_tag}" +else + log_dir="$log_dir/${branch}_${date_tag}" +fi + +hosts=() +usernames=() +passwords=() +workdirs=() +threads=() + +i=0 +while [ 1 ]; do + host=`jq .[$i].host $config_file` + if [ "$host" = "null" ]; then + break + fi + username=`jq .[$i].username $config_file` + if [ "$username" = "null" ]; then + break + fi + password=`jq .[$i].password $config_file` + if [ "$password" = "null" ]; then + password="" + fi + workdir=`jq .[$i].workdir $config_file` + if [ "$workdir" = "null" ]; then + break + fi + thread=`jq .[$i].thread $config_file` + if [ "$thread" = "null" ]; then + break + fi + hosts[i]=`echo $host|sed 's/\"$//'|sed 's/^\"//'` + usernames[i]=`echo $username|sed 's/\"$//'|sed 's/^\"//'` + passwords[i]=`echo $password|sed 's/\"$//'|sed 's/^\"//'` + workdirs[i]=`echo $workdir|sed 's/\"$//'|sed 's/^\"//'` + threads[i]=$thread + i=$(( i + 1 )) +done + + +function prepare_cases() { + cat $t_file >>$task_file + local i=0 + while [ $i -lt $1 ]; do + echo "%%FINISHED%%" >>$task_file + i=$(( i + 1 )) + done +} + +function clean_tmp() { + # clean tmp dir + local index=$1 + local ssh_script="sshpass -p ${passwords[index]} ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" + if [ -z ${passwords[index]} ]; then + ssh_script="ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" + fi + local cmd="${ssh_script} rm -rf ${workdirs[index]}/tmp" + ${cmd} +} +# build source +function build_src() { + echo "build source" + local index=$1 + local ssh_script="sshpass -p ${passwords[index]} ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" + if [ -z ${passwords[index]} ]; then + ssh_script="ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" + fi + local script=". ~/.bashrc;cd ${workdirs[index]}/TDinternal;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true;make -j8;make install" + local cmd="${ssh_script} sh -c \"$script\"" + echo "$cmd" + ${cmd} + if [ $? -ne 0 ]; then + flock -x $lock_file -c "echo \"${hosts[index]} TDengine build failed\" >>$log_dir/failed.log" + return + fi + script=". ~/.bashrc;cd ${workdirs[index]}/taos-tools;git submodule update --init --recursive;mkdir -p build;cd build;cmake ..;make -j4" + cmd="${ssh_script} sh -c \"$script\"" + ${cmd} + if [ $? -ne 0 ]; then + flock -x $lock_file -c "echo \"${hosts[index]} taos-tools build failed\" >>$log_dir/failed.log" + return + fi + script="cp -rf ${workdirs[index]}/taos-tools/build/build/bin/* ${workdirs[index]}/TDinternal/debug/build/bin/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib64/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/TDinternal/debug/build/bin/taosBenchmark ${workdirs[index]}/TDinternal/debug/build/bin/taosdemo" + cmd="${ssh_script} sh -c \"$script\"" + ${cmd} +} +function rename_taosdemo() { + local index=$1 + local ssh_script="sshpass -p ${passwords[index]} ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" + if [ -z ${passwords[index]} ]; then + ssh_script="ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" + fi + local script="cp -rf ${workdirs[index]}/TDinternal/debug/build/bin/taosBenchmark ${workdirs[index]}/TDinternal/debug/build/bin/taosdemo 2>/dev/null" + cmd="${ssh_script} sh -c \"$script\"" + ${cmd} +} + +function run_thread() { + local index=$1 + local thread_no=$2 + local runcase_script="sshpass -p ${passwords[index]} ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" + if [ -z ${passwords[index]} ]; then + runcase_script="ssh -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}" + fi + local count=0 + local script="${workdirs[index]}/TDinternal/community/tests/parallel_test/run_container.sh" + local cmd="${runcase_script} ${script}" + + # script="echo" + while [ 1 ]; do + local line=`flock -x $lock_file -c "head -n1 $task_file;sed -i \"1d\" $task_file"` + if [ "x$line" = "x%%FINISHED%%" ]; then + # echo "$index . $thread_no EXIT" + break + fi + if [ -z "$line" ]; then + continue + fi + echo "$line"|grep -q "^#" + if [ $? -eq 0 ]; then + continue + fi + local case_redo_time=`echo "$line"|cut -d, -f2` + if [ -z "$case_redo_time" ]; then + case_redo_time=${DEFAULT_RETRY_TIME:-2} + fi + local exec_dir=`echo "$line"|cut -d, -f3` + local case_cmd=`echo "$line"|cut -d, -f4` + local case_file="" + echo "$case_cmd"|grep -q "^python3" + if [ $? -eq 0 ]; then + case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'` + fi + echo "$case_cmd"|grep -q "\.sim" + if [ $? -eq 0 ]; then + case_file=`echo "$case_cmd"|grep -o ".*\.sim"|awk '{print $NF}'` + fi + if [ -z "$case_file" ]; then + case_file=`echo "$case_cmd"|awk '{print $NF}'` + fi + if [ -z "$case_file" ]; then + continue + fi + case_file="$exec_dir/${case_file}.${index}.${thread_no}" + count=$(( count + 1 )) + local case_path=`dirname "$case_file"` + if [ ! -z "$case_path" ]; then + mkdir -p $log_dir/$case_path + fi + cmd="${runcase_script} ${script} -w ${workdirs[index]} -c \"${case_cmd}\" -t ${thread_no} -d ${exec_dir}" + # echo "$thread_no $count $cmd" + local ret=0 + local redo_count=1 + start_time=`date +%s` + while [ ${redo_count} -lt 6 ]; do + echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$log_dir/$case_file.log + echo -e "\e[33m >>>>> \e[0m ${case_cmd}" + date >>$log_dir/$case_file.log + # $cmd 2>&1 | tee -a $log_dir/$case_file.log + # ret=${PIPESTATUS[0]} + $cmd >>$log_dir/$case_file.log 2>&1 + ret=$? + if [ $ret -eq 0 ]; then + break + fi + redo=0 + grep -q "wait too long for taosd start" $log_dir/$case_file.log + if [ $? -eq 0 ]; then + redo=1 + fi + grep -q "kex_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log + if [ $? -eq 0 ]; then + redo=1 + fi + grep -q "ssh_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log + if [ $? -eq 0 ]; then + redo=1 + fi + grep -q "kex_exchange_identification: read: Connection reset by peer" $log_dir/$case_file.log + if [ $? -eq 0 ]; then + redo=1 + fi + grep -q "Database not ready" $log_dir/$case_file.log + if [ $? -eq 0 ]; then + redo=1 + fi + grep -q "Unable to establish connection" $log_dir/$case_file.log + if [ $? -eq 0 ]; then + redo=1 + fi + if [ $redo_count -lt $case_redo_time ]; then + redo=1 + fi + if [ $redo -eq 0 ]; then + break + fi + redo_count=$(( redo_count + 1 )) + done + end_time=`date +%s` + echo >>$log_dir/$case_file.log + echo "${hosts[index]} execute time: $(( end_time - start_time ))s" >>$log_dir/$case_file.log + # echo "$thread_no ${line} DONE" + if [ $ret -ne 0 ]; then + flock -x $lock_file -c "echo \"${hosts[index]} ret:${ret} ${line}\" >>$log_dir/failed.log" + mkdir -p $log_dir/${case_file}.coredump + local remote_coredump_dir="${workdirs[index]}/tmp/thread_volume/$thread_no/coredump" + cmd="sshpass -p ${passwords[index]} scp -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}:${remote_coredump_dir}/* $log_dir/${case_file}.coredump/" + if [ -z ${passwords[index]} ]; then + cmd="scp -o StrictHostKeyChecking=no ${usernames[index]}@${hosts[index]}:${remote_coredump_dir}/* $log_dir/${case_file}.coredump/" + fi + $cmd # 2>/dev/null + local case_info=`echo "$line"|cut -d, -f 3,4` + local corefile=`ls $log_dir/${case_file}.coredump/` + corefile=`find $log_dir/${case_file}.coredump/ -name "core.*"` + echo -e "$case_info \e[31m failed\e[0m" + echo "=========================log============================" + cat $log_dir/$case_file.log + echo "=====================================================" + echo -e "\e[34m log file: $log_dir/$case_file.log \e[0m" + if [ ! -z "$corefile" ]; then + echo -e "\e[34m corefiles: $corefile \e[0m" + fi + fi + done +} + +# echo "hosts: ${hosts[@]}" +# echo "usernames: ${usernames[@]}" +# echo "passwords: ${passwords[@]}" +# echo "workdirs: ${workdirs[@]}" +# echo "threads: ${threads[@]}" +# TODO: check host accessibility + +i=0 +while [ $i -lt ${#hosts[*]} ]; do + clean_tmp $i & + i=$(( i + 1 )) +done +wait + +mkdir -p $log_dir +rm -rf $log_dir/* +task_file=$log_dir/$$.task +lock_file=$log_dir/$$.lock + +i=0 +while [ $i -lt ${#hosts[*]} ]; do + # build_src $i & + rename_taosdemo $i & + i=$(( i + 1 )) +done +wait +# if [ -f "$log_dir/failed.log" ]; then +# cat $log_dir/failed.log +# exit 1 +# fi + +i=0 +j=0 +while [ $i -lt ${#hosts[*]} ]; do + j=$(( j + threads[i] )) + i=$(( i + 1 )) +done +prepare_cases $j + +i=0 +while [ $i -lt ${#hosts[*]} ]; do + j=0 + while [ $j -lt ${threads[i]} ]; do + run_thread $i $j & + j=$(( j + 1 )) + done + i=$(( i + 1 )) +done + +wait + +rm -f $lock_file +rm -f $task_file + +# docker ps -a|grep -v CONTAINER|awk '{print $1}'|xargs docker rm -f +RET=0 +i=1 +if [ -f "$log_dir/failed.log" ]; then + echo "=====================================================" + while read line; do + line=`echo "$line"|cut -d, -f 3,4` + echo -e "$i. $line \e[31m failed\e[0m" >&2 + i=$(( i + 1 )) + done <$log_dir/failed.log + RET=1 +fi + +echo "${log_dir}" >&2 + +date + +exit $RET diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh new file mode 100755 index 0000000000000000000000000000000000000000..5b7802ac2b346547e4d2cd171e93c1d5937a5360 --- /dev/null +++ b/tests/parallel_test/run_case.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +CONTAINER_TESTDIR=/home/community +# CONTAINER_TESTDIR=/root/tang/repository/TDengine + +# export PATH=$PATH:$CONTAINER_TESTDIR/debug/build/bin + +function usage() { + echo "$0" + echo -e "\t -d execution dir" + echo -e "\t -c command" + echo -e "\t -h help" +} + +while getopts "d:c:h" opt; do + case $opt in + d) + exec_dir=$OPTARG + ;; + c) + cmd=$OPTARG + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + +if [ -z "$exec_dir" ]; then + usage + exit 0 +fi +if [ -z "$cmd" ]; then + usage + exit 0 +fi + +go env -w GOPROXY=https://goproxy.cn +echo "StrictHostKeyChecking no" >>/etc/ssh/ssh_config +ln -s /home/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null +npm config -g set unsafe-perm +npm config -g set registry https://registry.npm.taobao.org +mkdir -p /home/sim/tsim +mkdir -p /var/lib/taos/subscribe +rm -rf ${CONTAINER_TESTDIR}/src/connector/nodejs/node_modules +rm -rf ${CONTAINER_TESTDIR}/tests/examples/nodejs/node_modules +rm -rf ${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/node_modules +# ln -s /home/node_modules ${CONTAINER_TESTDIR}/src/connector/nodejs/ +# ln -s /home/node_modules ${CONTAINER_TESTDIR}/tests/examples/nodejs/ +# ln -s /home/node_modules ${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/ +# echo "$cmd"|grep -q "nodejs" +# if [ $? -eq 0 ]; then +# cd $CONTAINER_TESTDIR/src/connector/nodejs +# npm install node-gyp-build@4.3.0 --ignore-scripts +# fi + +cd $CONTAINER_TESTDIR/tests/$exec_dir +ulimit -c unlimited + +$cmd +RET=$? + +if [ $RET -ne 0 ]; then + pwd +fi + +exit $RET + diff --git a/tests/parallel_test/run_container.sh b/tests/parallel_test/run_container.sh new file mode 100755 index 0000000000000000000000000000000000000000..e0a2fc4dc8679356122afc6679e6748ab2f8f9e6 --- /dev/null +++ b/tests/parallel_test/run_container.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +function usage() { + echo "$0" + echo -e "\t -w work dir" + echo -e "\t -d execution dir" + echo -e "\t -c command" + echo -e "\t -t thread number" + echo -e "\t -h help" +} + +while getopts "w:d:c:t:h" opt; do + case $opt in + w) + WORKDIR=$OPTARG + ;; + d) + exec_dir=$OPTARG + ;; + c) + cmd=$OPTARG + ;; + t) + thread_no=$OPTARG + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + +if [ -z "$WORKDIR" ]; then + usage + exit 1 +fi +if [ -z "$exec_dir" ]; then + usage + exit 1 +fi +if [ -z "$cmd" ]; then + usage + exit 1 +fi +if [ -z "$thread_no" ]; then + usage + exit 1 +fi + +ulimit -c unlimited + +INTERNAL_REPDIR=$WORKDIR/TDinternal +REPDIR=$INTERNAL_REPDIR/community +CONTAINER_TESTDIR=/home/community +TMP_DIR=$WORKDIR/tmp + +MOUNT_DIR="" +mkdir -p ${TMP_DIR}/thread_volume/$thread_no/sim/tsim +mkdir -p ${TMP_DIR}/thread_volume/$thread_no/node_modules +mkdir -p ${TMP_DIR}/thread_volume/$thread_no/coredump +rm -rf ${TMP_DIR}/thread_volume/$thread_no/coredump/* +if [ ! -d "${TMP_DIR}/thread_volume/$thread_no/$exec_dir" ]; then + subdir=`echo "$exec_dir"|cut -d/ -f1` + echo "cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/" + cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/ +fi +MOUNT_DIR="$TMP_DIR/thread_volume/$thread_no/$exec_dir:$CONTAINER_TESTDIR/tests/$exec_dir" +echo "$thread_no -> ${exec_dir}:$cmd" +echo "$cmd"|grep -q "nodejs" +if [ $? -eq 0 ]; then + MOUNT_NODE_MOD="-v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/src/connector/nodejs/node_modules \ +-v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/tests/examples/nodejs/node_modules \ +-v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/node_modules" +fi +if [ -f "$REPDIR/src/plugins/taosadapter/example/config/taosadapter.toml" ]; then + TAOSADAPTER_TOML="-v $REPDIR/src/plugins/taosadapter/example/config/taosadapter.toml:/etc/taos/taosadapter.toml:ro" +fi + +docker run \ + -v $REPDIR/tests:$CONTAINER_TESTDIR/tests \ + -v $MOUNT_DIR \ + -v "$TMP_DIR/thread_volume/$thread_no/sim:${CONTAINER_TESTDIR}/sim" \ + -v ${TMP_DIR}/thread_volume/$thread_no/coredump:/home/coredump \ + -v $INTERNAL_REPDIR/debug:/home/debug:ro \ + -v $REPDIR/deps:$CONTAINER_TESTDIR/deps:ro \ + -v $REPDIR/src:$CONTAINER_TESTDIR/src \ + -v $REPDIR/src/inc/taos.h:/usr/include/taos.h:ro \ + $TAOSADAPTER_TOML \ + -v $REPDIR/tests/examples:$CONTAINER_TESTDIR/tests/examples \ + -v $REPDIR/snap:$CONTAINER_TESTDIR/snap:ro \ + -v $REPDIR/alert:$CONTAINER_TESTDIR/alert:ro \ + -v $REPDIR/packaging/cfg/taos.cfg:/etc/taos/taos.cfg:ro \ + -v $REPDIR/packaging:$CONTAINER_TESTDIR/packaging:ro \ + -v $REPDIR/README.md:$CONTAINER_TESTDIR/README.md:ro \ + -v $REPDIR/src/connector/python/taos:/usr/local/lib/python3.8/site-packages/taos:ro \ + -e LD_LIBRARY_PATH=/home/debug/build/lib:/home/debug/build/lib64 \ + -e PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/debug/build/bin:/usr/local/go/bin:/usr/local/node-v12.20.0-linux-x64/bin:/usr/local/apache-maven-3.8.4/bin:/usr/local/jdk1.8.0_144/bin \ + -e JAVA_HOME=/usr/local/jdk1.8.0_144 \ + --rm --ulimit core=-1 taos_test:v1.0 $CONTAINER_TESTDIR/tests/parallel_test/run_case.sh -d "$exec_dir" -c "$cmd" +ret=$? +exit $ret + diff --git a/tests/pytest/alter/alterBackQuoteCol.py b/tests/pytest/alter/alterBackQuoteCol.py new file mode 100644 index 0000000000000000000000000000000000000000..e929b9bc8cd51277d87e5125735441b1e429cdeb --- /dev/null +++ b/tests/pytest/alter/alterBackQuoteCol.py @@ -0,0 +1,69 @@ + +# -*- coding: utf-8 -*- + +import random +import string +import subprocess +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + def run(self): + tdLog.debug("check databases") + tdSql.prepare() + + ### test normal table + tdSql.execute("create database if not exists db") + tdSql.execute("use db") + tdSql.execute("create stable `sch.job.create` (`ts` TIMESTAMP, `tint` int, `node.value` NCHAR(7)) TAGS (`endpoint` NCHAR(7),`task.type` NCHAR(3))") + tdSql.execute("alter table `sch.job.create` modify tag `task.type` NCHAR(4)") + tdSql.execute("alter table `sch.job.create` change tag `task.type` `chan.type`") + tdSql.execute("alter table `sch.job.create` drop tag `chan.type`") + tdSql.execute("alter table `sch.job.create` add tag `add.type` NCHAR(6)") + tdSql.query("describe `sch.job.create`") + tdSql.checkData(4, 0, "add.type") + + tdSql.execute("alter table `sch.job.create` modify column `node.value` NCHAR(8)") + tdSql.execute("alter table `sch.job.create` drop column `node.value`") + tdSql.execute("alter table `sch.job.create` add column `add.value` NCHAR(6)") + + tdSql.query("describe `sch.job.create`") + tdSql.checkData(2, 0, "add.value") + + tdSql.execute("insert into `tsch.job.create` using `sch.job.create`(`add.type`) TAGS('tag1') values(now, 1, 'here')") + tdSql.execute("alter table `tsch.job.create` set tag `add.type` = 'tag2'") + tdSql.query("select `add.type` from `tsch.job.create`") + tdSql.checkData(0, 0, "tag2") + + ### test stable + tdSql.execute("create stable `ssch.job.create` (`ts` TIMESTAMP, `tint` int, `node.value` NCHAR(7)) TAGS (`endpoint` NCHAR(7),`task.type` NCHAR(3))") + tdSql.execute("alter stable `ssch.job.create` modify tag `task.type` NCHAR(4)") + tdSql.execute("alter stable `ssch.job.create` change tag `task.type` `chan.type`") + tdSql.execute("alter stable `ssch.job.create` drop tag `chan.type`") + tdSql.execute("alter stable `ssch.job.create` add tag `add.type` NCHAR(6)") + tdSql.query("describe `ssch.job.create`") + tdSql.checkData(4, 0, "add.type") + + tdSql.execute("alter stable `ssch.job.create` modify column `node.value` NCHAR(8)") + tdSql.execute("alter stable `ssch.job.create` drop column `node.value`") + tdSql.execute("alter stable `ssch.job.create` add column `add.value` NCHAR(6)") + + tdSql.query("describe `ssch.job.create`") + tdSql.checkData(2, 0, "add.value") + + tdSql.execute("insert into `tssch.job.create` using `ssch.job.create`(`add.type`) TAGS('tag1') values(now, 1, 'here')") + tdSql.error("alter stable `tssch.job.create` set tag `add.type` = 'tag2'") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index a208eaeb1302f4e20e34291db9f4a95b334865a8..b6f367c5ef5989add11015d69a5f0cb3afa9e730 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -289,6 +289,7 @@ python3 ./test.py -f query/bug6586.py # python3 ./test.py -f query/bug5903.py python3 ./test.py -f query/queryLimit.py python3 ./test.py -f query/queryPriKey.py +python3 ./test.py -f query/queryNcharNull.py #stream python3 ./test.py -f stream/metric_1.py @@ -416,6 +417,7 @@ python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f alter/alterColMultiTimes.py python3 ./test.py -f query/queryWildcardLength.py python3 ./test.py -f query/queryTbnameUpperLower.py +python3 ./test.py -f alter/alterBackQuoteCol.py python3 ./test.py -f query/query.py python3 ./test.py -f query/queryDiffColsTagsAndOr.py diff --git a/tests/pytest/functions/variable_httpDbNameMandatory.py b/tests/pytest/functions/variable_httpDbNameMandatory.py index 1cd2516ec9303f2f038bc22a69afcd01bce3a930..40415f4d9b8b0c0b48b1e87cb7e35d53af928ed3 100644 --- a/tests/pytest/functions/variable_httpDbNameMandatory.py +++ b/tests/pytest/functions/variable_httpDbNameMandatory.py @@ -130,10 +130,17 @@ class TDTestCase: if 'httpDbNameMandatory' not in rj: tdLog.info('has no httpDbNameMandatory shown') tdLog.exit(1) - if rj['httpDbNameMandatory'] != '1': + val = None + pname = 'taosadapter' #httpDbNameMandatory doesn't work in taosadapter + cmd = 'ps -ef|grep %s|grep -v "grep"' % pname + p = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE) + if p.wait() == 0: + val = p.stdout.read() + if rj['httpDbNameMandatory'] != '1' and pname not in str(val): tdLog.info('httpDbNameMandatory data:%s == expect:0'%rj['httpDbNameMandatory']) tdLog.exit(1) - tdLog.info("httpDbNameMandatory by restful query data:%s == expect:1" % (rj['httpDbNameMandatory'])) + if pname not in str(val): + tdLog.info("httpDbNameMandatory by restful query data:%s == expect:1" % (rj['httpDbNameMandatory'])) def run(self): diff --git a/tests/pytest/query/queryLimit.py b/tests/pytest/query/queryLimit.py index b7761ddf2a5594637140ae2b4748df1b1df157f5..2faf7fd633fa3530709a88740eeefcabfe69d4ac 100644 --- a/tests/pytest/query/queryLimit.py +++ b/tests/pytest/query/queryLimit.py @@ -56,6 +56,13 @@ class TDTestCase: self.test_case2() tdLog.debug(" LIMIT test_case2 ............ [OK]") + # insert data + self.insert_data("t2", self.ts, 100*10000, 30000); + self.insert_data("t3", self.ts, 200*10000, 30000); + # test supper table + self.test_limit() + tdLog.debug(" LIMIT test super table ............ [OK]") + # stop def stop(self): @@ -186,6 +193,31 @@ class TDTestCase: tdSql.waitedQuery(sql, 3, WAITS) tdSql.checkData(0, 1, 1) + # test limit + def test_limit(self): + # + # base test + # + + # offset + sql = "select * from st order by ts limit 20" + tdSql.waitedQuery(sql, 20, WAITS) + tdSql.checkData(19, 1, 6) + sql = "select * from st order by ts desc limit 20" + tdSql.waitedQuery(sql, 20, WAITS) + tdSql.checkData(19, 1, 2999980) + sql = "select * from st where ts>='2017-07-14 10:40:10' and ts<'2017-07-22 18:40:10' order by ts limit 16;" + tdSql.waitedQuery(sql, 16, WAITS) + tdSql.checkData(15, 1, 15) + sql = "select * from st where ts>='2017-07-14 10:40:10' and ts<'2017-07-22 18:40:10' order by ts desc limit 16;" + tdSql.waitedQuery(sql, 16, WAITS) + tdSql.checkData(15, 1, 720004) + sql = "select * from st where ts>='2017-07-14 10:40:10' and ts<'2017-07-22 18:40:10' order by ts desc limit 16;" + tdSql.waitedQuery(sql, 16, WAITS) + tdSql.checkData(15, 1, 720004) + sql = "select * from st where ts>='2017-07-14 10:40:10' and ts<'2017-07-22 18:40:10' order by ts desc limit 16 offset 3;" + tdSql.waitedQuery(sql, 16, WAITS) + tdSql.checkData(15, 1, 720003) # # add case with filename diff --git a/tests/pytest/query/queryNcharNull.py b/tests/pytest/query/queryNcharNull.py new file mode 100644 index 0000000000000000000000000000000000000000..75565afd58f6663c510a2b1735f34097cc795e83 --- /dev/null +++ b/tests/pytest/query/queryNcharNull.py @@ -0,0 +1,73 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create table stb1 (ts TIMESTAMP, id INT, col1 NCHAR(20), col2 BINARY(30), col3 FLOAT) TAGS (tid INT, name BINARY(20))" + ) + + tdSql.execute( + "insert into tb1 using stb1 tags(1, 'ABC') values (now - 1m, 1, '北京', '朝阳', 3.141)" + ) + + tdSql.execute( + "insert into tb1 using stb1 tags(1, 'ABC') values (now, 2, NULL, NULL, 3.141)" + ) + + tdSql.query( + "select * from (select * from stb1) where col1 = '北京'" + ) + + tdSql.checkData(0, 2, '北京') + + tdSql.execute( + "create table normal1 (ts TIMESTAMP, id INT, col1 NCHAR(20), col2 BINARY(30), col3 FLOAT)" + ) + + tdSql.execute( + "insert into normal1 values (now - 1m, 1, '北京', '朝阳', 3.141)" + ) + + tdSql.execute( + "insert into normal1 values (now, 1, NULL, NULL, 3.141)" + ) + + tdSql.query( + "select * from (select * from normal1) where col1 = '北京'" + ) + + tdSql.checkData(0, 2, '北京') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryNormal.py b/tests/pytest/query/queryNormal.py index a1789c8909f542ba3dcae83042ab50cde9e58e32..983efa5aa64ca84c206e24897478f4e00d584127 100644 --- a/tests/pytest/query/queryNormal.py +++ b/tests/pytest/query/queryNormal.py @@ -149,6 +149,27 @@ class TDTestCase: tdSql.checkData(1, 0, "2020-03-01 00:01:01") tdSql.checkData(1, 1, 421) tdSql.checkData(1, 2, "tm1") + + # TD-12980 + if platform.system() == "Linux": + types = ["tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"] + ts = 1640000000000 + + for type in types: + tdSql.execute("drop table if exists csvtest") + tdSql.execute("create table csvtest(ts timestamp, c1 %s)" % type) + for i in range(10): + tdSql.execute("insert into csvtest values(%d, %d)" % (ts + i, i)) + + os.system("taos -s 'select c1 from db.csvtest >> a.csv'") + + tdSql.query("select c1 from csvtest") + for i in range(10): + r = os.popen("sed -n %dp a.csv" % (i + 2)) + data = r.read() + tdSql.checkData(i, 0, int(data)) + + os.system("rm -rf a.csv") def stop(self): tdSql.close() diff --git a/tests/pytest/test-all.bat b/tests/pytest/test-all.bat index 1f1e2c1727527e91f7632213992607d6221eac85..fae9c4deaf9687634dd6fd8f4129f269c226659d 100644 --- a/tests/pytest/test-all.bat +++ b/tests/pytest/test-all.bat @@ -1,10 +1,20 @@ @echo off SETLOCAL EnableDelayedExpansion for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a") +set /a a=0 +echo Windows Taosd Test for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( echo Processing %%i - call %%i ARG1 -w 1 -m %1 > result.txt 2>error.txt - if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) + set /a a+=1 + call %%i ARG1 -w -m localhost > result_!a!.txt 2>error_!a!.txt + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) +) +echo Linux Taosd Test +for /F "usebackq tokens=*" %%i in (fulltest.bat) do ( + echo Processing %%i + set /a a+=1 + call %%i ARG1 -w 1 -m %1 > result_!a!.txt 2>error_!a!.txt + if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && exit 8 ) else ( call :colorEcho 0a "Success" &echo. ) ) exit diff --git a/tests/pytest/tools/taosdemoAllTest/insert-chinese-sml.json b/tests/pytest/tools/taosdemoAllTest/insert-chinese-sml.json index 638462518654dae797520bb6ea7db98ad5993b3b..49407a76d76edda3c45716134521265114702f11 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-chinese-sml.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-chinese-sml.json @@ -84,7 +84,7 @@ "sample_file": "./sample.csv", "tags_file": "", "columns": [{"type": "INT"}], - "tags": [{"type": "TINYINT", "count":2}] + "tags": [{"type": "nchar", "count":2}] }, { "name": "stb5", @@ -111,7 +111,7 @@ "sample_file": "./sample.csv", "tags_file": "", "columns": [{"type": "INT"}], - "tags": [{"type": "TINYINT"}] + "tags": [{"type": "nchar"}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py index 1a11d5c60cf8bbf97f4dd66a11e7fa85ba04a98c..ee86fefda4397bba4470a1dd9c32b78a0866b45c 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py @@ -131,10 +131,10 @@ class TDTestCase: tdSql.execute("drop database if exists db") os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json -y " % binPath) tdSql.error("select * from db.stb0") - # tdSql.execute("drop database if exists db") - # os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json -y " % binPath) - # tdSql.query("select count(*) from db.stb0") - # tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") + os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkData(0, 0, 10000) # there is no limit of 4096 columns,so cancels this case # tdSql.execute("drop database if exists db") diff --git a/tests/pytest/tools/taosdump-insert-dp1.json b/tests/pytest/tools/taosdump-insert-dp1.json new file mode 100644 index 0000000000000000000000000000000000000000..6481197bd6649576650ebeb95350ea50a31c1c1a --- /dev/null +++ b/tests/pytest/tools/taosdump-insert-dp1.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 8, + "thread_count_create_tbl": 8, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10000, + "num_of_records_per_req": 10000000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dp1", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "st2", + "child_table_exists":"no", + "childtable_count": 100002, + "childtable_prefix": "st2_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdump-insert-dp2.json b/tests/pytest/tools/taosdump-insert-dp2.json new file mode 100644 index 0000000000000000000000000000000000000000..384a905c737911214fab72a95c9e771c895f98fd --- /dev/null +++ b/tests/pytest/tools/taosdump-insert-dp2.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 8, + "thread_count_create_tbl": 8, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10000, + "num_of_records_per_req": 10000000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dp2", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "st2", + "child_table_exists":"no", + "childtable_count": 100002, + "childtable_prefix": "st2_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdumpTest3.py b/tests/pytest/tools/taosdumpTest3.py index ed83fe1ffb39f24f6eb7073d91bcaf62437527c3..702b2bf0c4e1fb8cd1116328ca8c7658f2a56452 100644 --- a/tests/pytest/tools/taosdumpTest3.py +++ b/tests/pytest/tools/taosdumpTest3.py @@ -54,20 +54,13 @@ class TDTestCase: def run(self): if not os.path.exists("./taosdumptest"): os.makedirs("./taosdumptest") - if not os.path.exists("./taosdumptest/tmp1"): - os.makedirs("./taosdumptest/tmp1") - if not os.path.exists("./taosdumptest/tmp2"): - os.makedirs("./taosdumptest/tmp2") - if not os.path.exists("./taosdumptest/tmp3"): - os.makedirs("./taosdumptest/tmp3") - if not os.path.exists("./taosdumptest/tmp4"): - os.makedirs("./taosdumptest/tmp4") - if not os.path.exists("./taosdumptest/tmp5"): - os.makedirs("./taosdumptest/tmp5") - if not os.path.exists("./taosdumptest/tmp6"): - os.makedirs("./taosdumptest/tmp6") - if not os.path.exists("./taosdumptest/tmp7"): - os.makedirs("./taosdumptest/tmp7") + for i in range(1,9): + if not os.path.exists("./taosdumptest/tmp%d"%i): + os.makedirs("./taosdumptest/tmp%d"%i) + else: + os.system("rm -rf ./taosdumptest/tmp%d"%i) + os.makedirs("./taosdumptest/tmp%d"%i) + buildPath = self.getBuildPath() if (buildPath == ""): tdLog.exit("taosdump not found!") @@ -122,31 +115,30 @@ class TDTestCase: tdSql.execute("insert into st0_1 values(1614218413000000001,8601,'A')(1614218423000000002,8601,'D')") - # tdSql.execute("insert into t0 values(1614218422000,8638,'R')") - os.system("rm -rf ./taosdumptest/tmp1/*") - os.system("rm -rf ./taosdumptest/tmp2/*") - os.system("rm -rf ./taosdumptest/tmp3/*") - os.system("rm -rf ./taosdumptest/tmp4/*") - os.system("rm -rf ./taosdumptest/tmp5/*") - # # taosdump stable and general table - os.system("%staosdump -o ./taosdumptest/tmp1 -D dp1,dp2 " % binPath) - os.system("%staosdump -o ./taosdumptest/tmp2 dp1 st0 gt0 " % binPath) - os.system("%staosdump -o ./taosdumptest/tmp3 dp2 st0 st1_0 gt0" % binPath) - os.system("%staosdump -o ./taosdumptest/tmp4 dp2 st0 st2 gt0 gt2" % binPath) + os.system("%staosdump -o ./taosdumptest/tmp1 -D dp1,dp2 -T 8 -B 1000" % binPath) + os.system("%staosdump -o ./taosdumptest/tmp2 dp1 st0 gt0 -T 8 -B 1000" % binPath) + os.system("%staosdump -o ./taosdumptest/tmp3 dp2 st0 st1_0 gt0 -T 8 -B 1000" % binPath) + os.system("%staosdump -o ./taosdumptest/tmp4 dp2 st0 st2 gt0 gt2 -T 8 -B 1000" % binPath) # verify ns - os.system("%staosdump -o ./taosdumptest/tmp6 dp3 st0_0" % binPath) + os.system("%staosdump -o ./taosdumptest/tmp6 dp3 st0_0 -T 8 -B 1000" % binPath) # verify -D:--database - os.system("%staosdump -o ./taosdumptest/tmp5 --databases dp1,dp2 " % binPath) + assert os.system("%staosdump -o ./taosdumptest/tmp5 --databases dp1,dp2 -T 8 -B 1000" % binPath) == 0 # verify mixed -D:--database and dbname tbname - assert os.system("%staosdump --databases dp1 -o ./taosdumptest/tmp5 dp2 st0 st1_0 gt0" % binPath) != 0 + assert os.system("%staosdump --databases dp1 -o ./taosdumptest/tmp5 dp2 st0 st1_0 gt0 -T 8 -B 1000" % binPath) != 0 + + # verify -N + os.system("%staosdump -o ./taosdumptest/tmp7 dp3 st0_0 -N -d null -T 8 -B 1000" % binPath) + + # verify -N -s + os.system("%staosdump -o ./taosdumptest/tmp8 dp3 st0_0 -N -s -T 8 -B 1000" % binPath) #check taosdumptest/tmp1 tdSql.execute("drop database dp1") tdSql.execute("drop database dp2") - os.system("%staosdump -i ./taosdumptest/tmp1 -T 2 " % binPath) + os.system("%staosdump -i ./taosdumptest/tmp1 -T 8 " % binPath) tdSql.execute("use dp1") tdSql.query("show stables") tdSql.checkRows(1) @@ -175,7 +167,7 @@ class TDTestCase: #check taosdumptest/tmp2 tdSql.execute("drop database dp1") tdSql.execute("drop database dp2") - os.system("%staosdump -i ./taosdumptest/tmp2 -T 2 " % binPath) + os.system("%staosdump -i ./taosdumptest/tmp2 -T 8 " % binPath) tdSql.execute("use dp1") tdSql.query("show stables") tdSql.checkRows(1) @@ -194,7 +186,7 @@ class TDTestCase: #check taosdumptest/tmp3 tdSql.execute("drop database dp1") - os.system("%staosdump -i ./taosdumptest/tmp3 -T 2 " % binPath) + os.system("%staosdump -i ./taosdumptest/tmp3 -T 8 " % binPath) tdSql.execute("use dp2") tdSql.query("show stables") tdSql.checkRows(2) @@ -210,7 +202,7 @@ class TDTestCase: #check taosdumptest/tmp4 tdSql.execute("drop database dp2") - os.system("%staosdump -i ./taosdumptest/tmp4 -T 2 " % binPath) + os.system("%staosdump -i ./taosdumptest/tmp4 -T 8 " % binPath) tdSql.execute("use dp2") tdSql.query("show stables") tdSql.checkRows(2) @@ -234,7 +226,7 @@ class TDTestCase: #check taosdumptest/tmp5 tdSql.execute("drop database dp2") - os.system("%staosdump -i ./taosdumptest/tmp5 -T 2 " % binPath) + os.system("%staosdump -i ./taosdumptest/tmp5 -T 8 " % binPath) tdSql.execute("use dp2") tdSql.query("show stables") tdSql.checkRows(3) @@ -267,12 +259,15 @@ class TDTestCase: tdSql.checkData(0,0,'2021-02-25 10:00:12.000') tdSql.checkData(0,1,637) - #check taosdumptest/tmp6 + # check taosdumptest/tmp6 tdSql.execute("drop database dp1") tdSql.execute("drop database dp2") tdSql.execute("drop database dp3") - os.system("%staosdump -i ./taosdumptest/tmp6 -T 2 " % binPath) + os.system("%staosdump -i ./taosdumptest/tmp6 -T 8 " % binPath) tdSql.execute("use dp3") + tdSql.query("show databases") + tdSql.checkRows(1) + tdSql.checkData(0,16,'ns') tdSql.query("show stables") tdSql.checkRows(1) tdSql.query("show tables") @@ -283,11 +278,39 @@ class TDTestCase: tdSql.checkData(0,0,'2021-02-25 10:00:12.000000001') tdSql.checkData(0,1,8600) - os.system("rm -rf ./taosdumptest/tmp1") - os.system("rm -rf ./taosdumptest/tmp2") - os.system("rm -rf ./taosdumptest/tmp3") - os.system("rm -rf ./taosdumptest/tmp4") - os.system("rm -rf ./taosdumptest/tmp5") + # check taosdumptest/tmp7 + tdSql.execute("drop database dp3") + os.system("%staosdump -i ./taosdumptest/tmp7 -T 8 " % binPath) + tdSql.execute("use dp3") + tdSql.query("show databases") + tdSql.checkRows(1) + tdSql.checkData(0,16,'ms') + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(1) + tdSql.query("select count(*) from st0_0") + tdSql.checkRows(0) + # tdSql.query("select * from st0 order by ts") + # tdSql.checkData(0,0,'2021-02-25 10:00:12.000000001') + # tdSql.checkData(0,1,8600) + + # check taosdumptest/tmp8 + tdSql.execute("drop database dp3") + os.system("%staosdump -i ./taosdumptest/tmp8 -T 8 " % binPath) + tdSql.execute("use dp3") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(1) + tdSql.query("select count(*) from st0_0") + tdSql.checkRows(0) + # tdSql.query("select * from st0 order by ts") + # tdSql.checkData(0,0,'2021-02-25 10:00:12.000000001') + # tdSql.checkData(0,1,8600) + + for i in range(1,9): + os.system("rm -rf ./taosdumptest/tmp%d"%i) os.system("rm -rf ./dump_result.txt") os.system("rm -rf ./db.csv") diff --git a/tests/pytest/tools/taosdumpTestBenchmark.py b/tests/pytest/tools/taosdumpTestBenchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..63af4b62b5a84444dd97b3889b7e1115aeaabb7c --- /dev/null +++ b/tests/pytest/tools/taosdumpTestBenchmark.py @@ -0,0 +1,393 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1538548685000 + self.numberOfTables = 10000 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def insert_data(self, tbname, ts_start, count): + pre_insert = "insert into %s values"%tbname + sql = pre_insert + tdLog.debug("doing insert table %s rows=%d ..."%(tbname, count)) + for i in range(count): + sql += " (%d,%d)"%(ts_start + i*1000, i) + if i >0 and i%30000 == 0: + tdSql.execute(sql) + sql = pre_insert + # end sql + if sql != pre_insert: + tdSql.execute(sql) + + tdLog.debug("INSERT TABLE DATA ............ [OK]") + return + + def run(self): + if not os.path.exists("./taosdumptest"): + os.makedirs("./taosdumptest") + else: + os.system("rm -rf ./taosdumptest") + os.makedirs("./taosdumptest") + + for i in range(2): + if not os.path.exists("./taosdumptest/tmp%d"%i): + os.makedirs("./taosdumptest/tmp%d"%i) + else: + os.system("rm -rf ./taosdumptest/tmp%d"%i) + os.makedirs("./taosdumptest/tmp%d"%i) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # create db1 , one stables and one table ; create general tables + tdSql.execute("drop database if exists dp1") + tdSql.execute("drop database if exists dp2") + tdSql.execute("create database if not exists dp1") + tdSql.execute("use dp1") + tdSql.execute('''create table st0(ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, + c7 bool, c8 binary(20), c9 nchar(20), c11 tinyint unsigned, c12 smallint unsigned, c13 int unsigned, c14 bigint unsigned, c15 timestamp ) + tags(t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 bool, t8 binary(20), t9 nchar(20), t11 tinyint unsigned, + t12 smallint unsigned, t13 int unsigned, t14 bigint unsigned, t15 timestamp)''') + tdSql.execute('''create table st1(ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, + c7 bool, c8 binary(20), c9 nchar(20), c11 tinyint unsigned, c12 smallint unsigned, c13 int unsigned, c14 bigint unsigned, c15 timestamp ) tags(jtag json)''') + + intData = [] + floatData = [] + rowNum = 10 + tabNum = 10 + ts = 1537146000000 + for j in range(tabNum): + tdSql.execute("create table st0_%d using st0 tags( %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d);" + % (j, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, ts)) + for i in range(rowNum): + tdSql.execute("insert into st0_%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d)" + % (j, ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, ts)) + intData.append(i + 1) + floatData.append(i + 0.1) + rowNum = 20 + tabNum = 20 + for j in range(tabNum): + tdSql.execute("create table st1_%d using st1 tags('{\"nv\":null,\"tea\":true,\"\":false,\" \":123%d,\"tea\":false}');" % (j, j + 1)) + for i in range(rowNum): + tdSql.execute("insert into st1_%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d)" + % (j, self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, self.ts)) + intData.append(i + 1) + floatData.append(i + 0.1) + # os.system("%staosBenchmark -f tools/taosdump-insert-dp1.json -y " % binPath) + + + # create db1 , three stables:stb0,include ctables stb0_0 \ stb0_1,stb1 include ctables stb1_0 and stb1_1 + # \stb3,include ctables stb3_0 and stb3_1 + # create general three tables gt0 gt1 gt2 + tdSql.execute("create database if not exists dp2") + tdSql.execute("use dp2") + tdSql.execute("create stable st0(ts timestamp, c01 int, c02 nchar(10)) tags(t1 int)") + tdSql.execute("create table st0_0 using st0 tags(0) st0_1 using st0 tags(1) ") + tdSql.execute("insert into st0_0 values(1614218412000,8600,'R')(1614218422000,8600,'E')") + tdSql.execute("insert into st0_1 values(1614218413000,8601,'A')(1614218423000,8601,'D')") + tdSql.execute("create stable st1(ts timestamp, c11 float, c12 nchar(10)) tags(t1 int)") + tdSql.execute("create table st1_0 using st1 tags(0) st1_1 using st1 tags(1) ") + tdSql.execute("insert into st1_0 values(1614218412000,8610.1,'R')(1614218422000,8610.1,'E')") + tdSql.execute("insert into st1_1 values(1614218413000,8611.2,'A')(1614218423000,8611.1,'D')") + tdSql.execute("create stable st2(ts timestamp, c21 float, c22 nchar(10)) tags(t1 int)") + tdSql.execute("create table st20 using st2 tags(0) st21 using st2 tags(1) ") + tdSql.execute("insert into st20 values(1614218412000,8620.3,'R')(1614218422000,8620.3,'E')") + tdSql.execute("insert into st21 values(1614218413000,8621.4,'A')(1614218423000,8621.4,'D')") + tdSql.execute("create table if not exists gt0 (ts timestamp, c00 int, c01 float) ") + tdSql.execute("create table if not exists gt1 (ts timestamp, c10 int, c11 double) ") + tdSql.execute("create table if not exists gt2 (ts timestamp, c20 int, c21 float) ") + tdSql.execute("insert into gt0 values(1614218412700,8637,78.86155)") + tdSql.execute("insert into gt1 values(1614218413800,8638,78.862020199)") + tdSql.execute("insert into gt2 values(1614218413900,8639,78.863)") + # self.insert_data("t", self.ts, 300*10000); + # os.system("%staosBenchmark -f tools/taosdump-insert-dp2.json -y " % binPath) + + + + + # # taosdump data + # os.system("%staosdump -o ./taosdumptest/tmp1 taosdump -h -ptaosdata -P 6030 -u root -o taosdumptest \ + # -D dp1,dp3 -N -c /home/chr/TDinternal/community/sim/dnode1/cfg/taos.cfg -s -d deflate" % binPath) + os.system("%staosdump -o ./taosdumptest/tmp0 -D dp2,dp1 -T 8 -B 100000" % binPath) + os.system("%staosdump -o ./taosdumptest/tmp1 dp2 st0 st1_0 gt0 -T 8 -B 1000" % binPath) + + + #check taosdumptest/tmp0 + tdSql.execute("drop database dp1") + tdSql.execute("drop database dp2") + os.system("%staosdump -i ./taosdumptest/tmp0 -T 8 " % binPath) + tdSql.execute("reset query cache") + + tdSql.execute("use dp1") + tdSql.query("show stables") + tdSql.checkRows(3) + for i in range(3): + for j in range(3): + if j < 2: + if tdSql.queryResult[i][0] == 'st%d'%j: + tdSql.checkData(i, 4, (j+1)*10) + else: + if tdSql.queryResult[i][0] == 'st%d'%j: + tdSql.checkData(i, 4, 100002) + + tdSql.query("select count(*) from st0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from st1") + tdSql.checkData(0, 0, 400) + tdSql.query("select count(*) from st2") + tdSql.checkData(0, 0, 1000020) + + + tdSql.execute("use dp2") + tdSql.query("show stables") + tdSql.checkRows(3) + for i in range(3): + for j in range(3): + if j < 2: + if tdSql.queryResult[i][0] == 'st%d'%j: + # print(i,"stb%d"%j) + tdSql.checkData(i, 4, 2) + else: + if tdSql.queryResult[i][0] == 'st%d'%j: + tdSql.checkData(i, 4, 100002) + tdSql.query("select count(*) from st0") + tdSql.checkData(0, 0, 4) + tdSql.query("select count(*) from st1") + tdSql.checkData(0, 0, 4) + tdSql.query("select count(*) from st2") + tdSql.checkData(0, 0, 1000024) + tdSql.query("select ts from gt0") + tdSql.checkData(0,0,'2021-02-25 10:00:12.700') + tdSql.query("select c10 from gt1") + tdSql.checkData(0, 0, 8638) + tdSql.query("select c20 from gt2") + tdSql.checkData(0, 0, 8639) + + #check taosdumptest/tmp1 + tdSql.execute("drop database dp1") + tdSql.execute("drop database dp2") + os.system("%staosdump -i ./taosdumptest/tmp1 -T 8 " % binPath) + tdSql.execute("reset query cache") + tdSql.execute("use dp2") + tdSql.query("show stables") + tdSql.checkRows(2) + tdSql.query("show tables") + tdSql.checkRows(4) + tdSql.query("select count(*) from st1_0") + tdSql.checkData(0,0,2) + tdSql.query("select ts from gt0") + tdSql.checkData(0,0,'2021-02-25 10:00:12.700') + tdSql.error("use dp1") + tdSql.error("select count(*) from st2_0") + tdSql.error("select count(*) from gt2") + + + # #check taosdumptest/tmp2 + # tdSql.execute("drop database dp1") + # tdSql.execute("drop database dp2") + # os.system("%staosdump -i ./taosdumptest/tmp2 -T 8 " % binPath) + # tdSql.execute("use dp1") + # tdSql.query("show stables") + # tdSql.checkRows(1) + # tdSql.query("show tables") + # tdSql.checkRows(3) + # tdSql.query("select c1 from st0_0 order by ts") + # tdSql.checkData(0,0,8537) + # tdSql.query("select c2 from st0_1 order by ts") + # tdSql.checkData(1,0,"D") + # tdSql.query("select * from gt0") + # tdSql.checkData(0,0,'2021-02-25 10:00:12.000') + # tdSql.checkData(0,1,637) + # tdSql.error("select count(*) from gt1") + # tdSql.error("use dp2") + + + # #check taosdumptest/tmp3 + # tdSql.execute("drop database dp1") + # os.system("%staosdump -i ./taosdumptest/tmp3 -T 8 " % binPath) + # tdSql.execute("use dp2") + # tdSql.query("show stables") + # tdSql.checkRows(2) + # tdSql.query("show tables") + # tdSql.checkRows(4) + # tdSql.query("select count(*) from st1_0") + # tdSql.checkData(0,0,2) + # tdSql.query("select ts from gt0") + # tdSql.checkData(0,0,'2021-02-25 10:00:12.700') + # tdSql.error("use dp1") + # tdSql.error("select count(*) from st2_0") + # tdSql.error("select count(*) from gt2") + + # #check taosdumptest/tmp4 + # tdSql.execute("drop database dp2") + # os.system("%staosdump -i ./taosdumptest/tmp4 -T 8 " % binPath) + # tdSql.execute("use dp2") + # tdSql.query("show stables") + # tdSql.checkRows(2) + # tdSql.query("show tables") + # tdSql.checkRows(6) + # tdSql.query("select c20 from gt2") + # tdSql.checkData(0, 0, 8639) + # tdSql.query("select count(*) from st0_0") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select count(*) from st0_1") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select count(*) from st2_1") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select count(*) from st2_0") + # tdSql.checkData(0, 0, 2) + # tdSql.error("use dp1") + # tdSql.error("select count(*) from st1_0") + # tdSql.error("select count(*) from st1_1") + # tdSql.error("select count(*) from gt3") + + + # #check taosdumptest/tmp5 + # tdSql.execute("drop database dp2") + # os.system("%staosdump -i ./taosdumptest/tmp5 -T 8 " % binPath) + # tdSql.execute("use dp2") + # tdSql.query("show stables") + # tdSql.checkRows(3) + # tdSql.query("show tables") + # tdSql.checkRows(9) + # tdSql.query("select c20 from gt2") + # tdSql.checkData(0, 0, 8639) + # tdSql.query("select count(*) from st0_0") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select count(*) from st0_1") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select count(*) from st2_1") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select count(*) from st2_0") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select count(*) from st1_1") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select count(*) from st1_0") + # tdSql.checkData(0, 0, 2) + # tdSql.execute("use dp1") + # tdSql.query("show stables") + # tdSql.checkRows(1) + # tdSql.query("show tables") + # tdSql.checkRows(4) + # tdSql.query("select c1 from st0_0 order by ts") + # tdSql.checkData(0,0,8537) + # tdSql.query("select c2 from st0_1 order by ts") + # tdSql.checkData(1,0,"D") + # tdSql.query("select * from gt0") + # tdSql.checkData(0,0,'2021-02-25 10:00:12.000') + # tdSql.checkData(0,1,637) + + # # check taosdumptest/tmp6 + # tdSql.execute("drop database dp1") + # tdSql.execute("drop database dp2") + # tdSql.execute("drop database dp3") + # os.system("%staosdump -i ./taosdumptest/tmp6 -T 8 " % binPath) + # tdSql.execute("use dp3") + # tdSql.query("show databases") + # tdSql.checkRows(1) + # tdSql.checkData(0,16,'ns') + # tdSql.query("show stables") + # tdSql.checkRows(1) + # tdSql.query("show tables") + # tdSql.checkRows(1) + # tdSql.query("select count(*) from st0_0") + # tdSql.checkData(0, 0, 2) + # tdSql.query("select * from st0 order by ts") + # tdSql.checkData(0,0,'2021-02-25 10:00:12.000000001') + # tdSql.checkData(0,1,8600) + + # # check taosdumptest/tmp7 + # tdSql.execute("drop database dp3") + # os.system("%staosdump -i ./taosdumptest/tmp7 -T 8 " % binPath) + # tdSql.execute("use dp3") + # tdSql.query("show databases") + # tdSql.checkRows(1) + # tdSql.checkData(0,16,'ms') + # tdSql.query("show stables") + # tdSql.checkRows(1) + # tdSql.query("show tables") + # tdSql.checkRows(1) + # tdSql.query("select count(*) from st0_0") + # tdSql.checkRows(0) + # # tdSql.query("select * from st0 order by ts") + # # tdSql.checkData(0,0,'2021-02-25 10:00:12.000000001') + # # tdSql.checkData(0,1,8600) + + # # check taosdumptest/tmp8 + # tdSql.execute("drop database dp3") + # os.system("%staosdump -i ./taosdumptest/tmp8 -T 8 " % binPath) + # tdSql.execute("use dp3") + # tdSql.query("show stables") + # tdSql.checkRows(1) + # tdSql.query("show tables") + # tdSql.checkRows(1) + # tdSql.query("select count(*) from st0_0") + # tdSql.checkRows(0) + # # tdSql.query("select * from st0 order by ts") + # # tdSql.checkData(0,0,'2021-02-25 10:00:12.000000001') + # # tdSql.checkData(0,1,8600) + + # os.system("rm -rf ./taosdumptest/tmp1") + # os.system("rm -rf ./taosdumptest/tmp2") + # os.system("rm -rf ./taosdumptest/tmp3") + # os.system("rm -rf ./taosdumptest/tmp4") + # os.system("rm -rf ./taosdumptest/tmp5") + # os.system("rm -rf ./dump_result.txt") + # os.system("rm -rf ./db.csv") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/update/append_commit_data.py b/tests/pytest/update/append_commit_data.py index 867ee696a261936e0118250995da4da5dca7ffb5..524c66d06af6d0c5ba1564fd772da17d217066af 100644 --- a/tests/pytest/update/append_commit_data.py +++ b/tests/pytest/update/append_commit_data.py @@ -59,7 +59,7 @@ class TDTestCase: tdSql.query("select * from db.t1") tdSql.checkRows(insertRows) - for k in range(0,100): + for k in range(0,10): tdLog.info("insert %d rows" % (insertRows)) temp='' for i in range (0,insertRows): @@ -75,6 +75,7 @@ class TDTestCase: tdDnodes.start(1) tdSql.query("select * from db.t1") tdSql.checkRows(insertRows+200*k) + insertRows = insertRows * 2 print("==========step3") print("insert into another table ") s = 'use db' diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 30b5fc645b0539609c92dbfb0dbb2a8cd4797cd5..427cfe349e6ea9a43c0eab93dbee12828c8c3db1 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -287,8 +287,9 @@ class TDDnode: print(cmd) - taosadapterCmd = "nohup %s > /dev/null 2>&1 & " % ( + taosadapterCmd = "nohup %s --opentsdb_telnet.enable=true > /dev/null 2>&1 & " % ( taosadapterBinPath) + tdLog.info(taosadapterCmd) if os.system(taosadapterCmd) != 0: tdLog.exit(taosadapterCmd) diff --git a/tests/script/general/parser/columnValue_unsign.sim b/tests/script/general/parser/columnValue_unsign.sim index 8e44ccb5facf074691f3aeeb7c60099ab6ef691f..9e011dd74ab49d8fc9617a20e6478d1e463c2433 100644 --- a/tests/script/general/parser/columnValue_unsign.sim +++ b/tests/script/general/parser/columnValue_unsign.sim @@ -117,7 +117,7 @@ if $data00 != NULL then endi sql select count(*), a from mt_unsigned_1 group by a; -if $rows != 1 then +if $rows != 2 then return -1 endi @@ -125,12 +125,20 @@ if $data00 != 1 then return -1 endi -if $data01 != 1 then +if $data01 != NULL then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +if $data11 != 1 then return -1 endi sql select count(*), b from mt_unsigned_1 group by b; -if $rows != 1 then +if $rows != 2 then return -1 endi @@ -138,12 +146,20 @@ if $data00 != 1 then return -1 endi -if $data01 != 2 then +if $data01 != NULL then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +if $data11 != 2 then return -1 endi sql select count(*), c from mt_unsigned_1 group by c; -if $rows != 1 then +if $rows != 2 then return -1 endi @@ -151,12 +167,20 @@ if $data00 != 1 then return -1 endi -if $data01 != 3 then +if $data01 != NULL then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +if $data11 != 3 then return -1 endi sql select count(*), d from mt_unsigned_1 group by d; -if $rows != 1 then +if $rows != 2 then return -1 endi @@ -164,7 +188,15 @@ if $data00 != 1 then return -1 endi -if $data01 != 4 then +if $data01 != NULL then + return -1 +endi + +if $data10 != 1 then + return -1 +endi + +if $data11 != 4 then return -1 endi diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/README b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/README new file mode 100644 index 0000000000000000000000000000000000000000..efbe1856473bd71195e7c423738d5dc6f1a9875b --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/README @@ -0,0 +1,30 @@ +# How to build image +## eg: +cd ./collectd_docker +docker build -t "taosadapter_collectd:v1" . + +# How to run single container +## eg: +cd ./collectd_docker +./run_collectd.sh -h +#Usage: +#1st arg: agent_count +#2nd arg: container_hostname prefix +#3rd arg: TaosadapterIp +#4th arg: TaosadapterPort +#5th arg: CollectdInterval +#eg: ./run_collectd.sh 1 collectd_agent1 172.26.10.86 6047 1 +#eg: ./run_collectd.sh 2 collectd_agent* 172.26.10.86 6047 1 +#rm all: ./run_collectd.sh rm collectd_agent* + +# How to run all container +## You need to edit run_all.sh to set taosadapter ip/port by manual, but count of each agent could be defined in bash_args +./run_all.sh -h +#Usage: +#1st arg: collectd_count +#2nd arg: icinga2_count +#3rd arg: statsd_count +#4th arg: tcollector_count +#5th arg: telegraf_count +#6th arg: node_exporter port range +#eg: ./run_all.sh 10 10 1 10 50 10000:10020 diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/Dockerfile b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..6e4c17a2c0a5a1999764f6d7817004618a1e52b3 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:20.04 +ENV REFRESHED_AT 2021-12-04 +WORKDIR /root +ARG DEBIAN_FRONTEND=noninteractive +RUN set -ex; \ + apt update -y --fix-missing && \ + apt-get install -y --no-install-recommends collectd && \ + rm -rf /var/lib/apt/lists/* +COPY collectd.conf /etc/collectd/collectd.conf +COPY entrypoint.sh /entrypoint.sh +ENV CollectdHostname localhost +ENV TaosadapterIp 127.0.0.1 +ENV TaosadapterPort 6047 +ENV CollectdInterval 10 +ENTRYPOINT ["/entrypoint.sh"] diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/collectd.conf b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/collectd.conf new file mode 100644 index 0000000000000000000000000000000000000000..052d60aab7e34549a1dabf69afb6326c0e5dda4c --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/collectd.conf @@ -0,0 +1,1649 @@ +# Config file for collectd(1). +# +# Some plugins need additional configuration and are disabled by default. +# Please read collectd.conf(5) for details. +# +# You should also read /usr/share/doc/collectd-core/README.Debian.plugins +# before enabling any more plugins. + +############################################################################## +# Global # +#----------------------------------------------------------------------------# +# Global settings for the daemon. # +############################################################################## + +Hostname "CollectdHostname" +FQDNLookup true +#BaseDir "/var/lib/collectd" +#PluginDir "/usr/lib/collectd" +#TypesDB "/usr/share/collectd/types.db" "/etc/collectd/my_types.db" + +#----------------------------------------------------------------------------# +# When enabled, plugins are loaded automatically with the default options # +# when an appropriate block is encountered. # +# Disabled by default. # +#----------------------------------------------------------------------------# +#AutoLoadPlugin false + +#----------------------------------------------------------------------------# +# When enabled, internal statistics are collected, using "collectd" as the # +# plugin name. # +# Disabled by default. # +#----------------------------------------------------------------------------# +#CollectInternalStats false + +#----------------------------------------------------------------------------# +# Interval at which to query values. This may be overwritten on a per-plugin # +# base by using the 'Interval' option of the LoadPlugin block: # +# # +# Interval 60 # +# # +#----------------------------------------------------------------------------# +Interval CollectdInterval + +#MaxReadInterval 86400 +#Timeout 2 +#ReadThreads 5 +#WriteThreads 5 + +# Limit the size of the write queue. Default is no limit. Setting up a limit +# is recommended for servers handling a high volume of traffic. +#WriteQueueLimitHigh 1000000 +#WriteQueueLimitLow 800000 + +############################################################################## +# Logging # +#----------------------------------------------------------------------------# +# Plugins which provide logging functions should be loaded first, so log # +# messages generated when loading or configuring other plugins can be # +# accessed. # +############################################################################## + +LoadPlugin logfile +LoadPlugin syslog +#LoadPlugin log_logstash + +# +# LogLevel "info" +# File "/var/log/collectd.log" +# Timestamp true +# PrintSeverity false +# + + + LogLevel info + + +# +# LogLevel info +# File "/var/log/collectd.json.log" +# + +############################################################################## +# LoadPlugin section # +#----------------------------------------------------------------------------# +# Specify what features to activate. # +############################################################################## + +#LoadPlugin aggregation +#LoadPlugin amqp +#LoadPlugin apache +#LoadPlugin apcups +#LoadPlugin ascent +#LoadPlugin barometer +LoadPlugin battery +#LoadPlugin bind +#LoadPlugin ceph +#LoadPlugin cgroups +#LoadPlugin chrony +#LoadPlugin conntrack +#LoadPlugin contextswitch +LoadPlugin cpu +#LoadPlugin cpufreq +#LoadPlugin cpusleep +#LoadPlugin csv +#LoadPlugin curl +#LoadPlugin curl_json +#LoadPlugin curl_xml +#LoadPlugin dbi +LoadPlugin df +LoadPlugin disk +#LoadPlugin dns +#LoadPlugin dpdkevents +#LoadPlugin dpdkstat +#LoadPlugin drbd +#LoadPlugin email +#LoadPlugin entropy +#LoadPlugin ethstat +#LoadPlugin exec +#LoadPlugin fhcount +#LoadPlugin filecount +#LoadPlugin fscache +#LoadPlugin gmond +#LoadPlugin gps +#LoadPlugin hugepages +#LoadPlugin grpc +#LoadPlugin hddtemp +#LoadPlugin intel_rdt +#LoadPlugin interface +#LoadPlugin ipc +#LoadPlugin ipmi +#LoadPlugin iptables +#LoadPlugin ipvs +#LoadPlugin irq +#LoadPlugin java +LoadPlugin load +#LoadPlugin lua +#LoadPlugin lvm +#LoadPlugin madwifi +#LoadPlugin mbmon +#LoadPlugin mcelog +#LoadPlugin md +#LoadPlugin memcachec +#LoadPlugin memcached +LoadPlugin memory +#LoadPlugin modbus +#LoadPlugin mqtt +#LoadPlugin multimeter +#LoadPlugin mysql +#LoadPlugin netlink +#LoadPlugin network +#LoadPlugin nfs +#LoadPlugin nginx +#LoadPlugin notify_desktop +#LoadPlugin notify_email +#LoadPlugin notify_nagios +#LoadPlugin ntpd +#LoadPlugin numa +#LoadPlugin nut +#LoadPlugin olsrd +#LoadPlugin onewire +#LoadPlugin openldap +#LoadPlugin openvpn +#LoadPlugin ovs_events +#LoadPlugin ovs_stats +#LoadPlugin perl +#LoadPlugin pinba +#LoadPlugin ping +#LoadPlugin postgresql +#LoadPlugin powerdns +#LoadPlugin processes +#LoadPlugin protocols +#LoadPlugin python +#LoadPlugin redis +#LoadPlugin rrdcached +LoadPlugin rrdtool +#LoadPlugin sensors +#LoadPlugin serial +#LoadPlugin sigrok +#LoadPlugin smart +#LoadPlugin snmp +#LoadPlugin snmp_agent +#LoadPlugin statsd +#LoadPlugin swap +#LoadPlugin table +#LoadPlugin tail +#LoadPlugin tail_csv +#LoadPlugin tcpconns +#LoadPlugin teamspeak2 +#LoadPlugin ted +#LoadPlugin thermal +#LoadPlugin tokyotyrant +#LoadPlugin turbostat +#LoadPlugin unixsock +#LoadPlugin uptime +#LoadPlugin users +#LoadPlugin uuid +#LoadPlugin varnish +#LoadPlugin virt +#LoadPlugin vmem +#LoadPlugin vserver +#LoadPlugin wireless +#LoadPlugin write_graphite +#LoadPlugin write_http +#LoadPlugin write_kafka +#LoadPlugin write_log +#LoadPlugin write_mongodb +#LoadPlugin write_prometheus +#LoadPlugin write_redis +#LoadPlugin write_riemann +#LoadPlugin write_sensu +LoadPlugin write_tsdb +#LoadPlugin xencpu +#LoadPlugin zfs_arc +#LoadPlugin zookeeper + +############################################################################## +# Plugin configuration # +#----------------------------------------------------------------------------# +# In this section configuration stubs for each plugin are provided. A desc- # +# ription of those options is available in the collectd.conf(5) manual page. # +############################################################################## + +# +# +# #Host "unspecified" +# Plugin "cpu" +# PluginInstance "/[0,2,4,6,8]$/" +# Type "cpu" +# #TypeInstance "unspecified" +# +# SetPlugin "cpu" +# SetPluginInstance "even-%{aggregation}" +# +# GroupBy "Host" +# GroupBy "TypeInstance" +# +# CalculateNum false +# CalculateSum false +# CalculateAverage true +# CalculateMinimum false +# CalculateMaximum false +# CalculateStddev false +# +# + +# +# +# Host "localhost" +# Port "5672" +# VHost "/" +# User "guest" +# Password "guest" +# Exchange "amq.fanout" +# RoutingKey "collectd" +# Persistent false +# StoreRates false +# ConnectionRetryDelay 0 +# +# + +# +# +# URL "http://localhost/server-status?auto" +# User "www-user" +# Password "secret" +# VerifyPeer false +# VerifyHost false +# CACert "/etc/ssl/ca.crt" +# Server "apache" +# +# +# +# URL "http://some.domain.tld/status?auto" +# Host "some.domain.tld" +# Server "lighttpd" +# +# + +# +# Host "localhost" +# Port "3551" +# ReportSeconds true +# PersistentConnection true +# + +# +# URL "http://localhost/ascent/status/" +# User "www-user" +# Password "secret" +# VerifyPeer false +# VerifyHost false +# CACert "/etc/ssl/ca.crt" +# + +# +# Device "/dev/i2c-0"; +# Oversampling 512 +# PressureOffset 0.0 +# TemperatureOffset 0.0 +# Normalization 2 +# Altitude 238.0 +# TemperatureSensor "myserver/onewire-F10FCA000800/temperature" +# + +# +# ValuesPercentage false +# ReportDegraded false +# QueryStateFS false +# + +# +# URL "http://localhost:8053/" +# +# ParseTime false +# +# OpCodes true +# QTypes true +# ServerStats true +# ZoneMaintStats true +# ResolverStats false +# MemoryStats true +# +# +# QTypes true +# ResolverStats true +# CacheRRSets true +# +# Zone "127.in-addr.arpa/IN" +# +# + +# +# LongRunAvgLatency false +# ConvertSpecialMetricTypes true +# +# SocketPath "/var/run/ceph/ceph-osd.0.asok" +# +# +# SocketPath "/var/run/ceph/ceph-osd.1.asok" +# +# +# SocketPath "/var/run/ceph/ceph-mon.ceph1.asok" +# +# +# SocketPath "/var/run/ceph/ceph-mds.ceph1.asok" +# +# + +# +# Host "localhost" +# Port "323" +# Timeout "2" +# + +# +# CGroup "libvirt" +# IgnoreSelected false +# + +# +# ReportByCpu true +# ReportByState true +# ValuesPercentage false +# ReportNumCpu false +# ReportGuestState false +# SubtractGuestState true +# + +# +# DataDir "/var/lib/collectd/csv" +# StoreRates false +# + +# +# +# URL "http://finance.google.com/finance?q=NYSE%3AAMD" +# User "foo" +# Password "bar" +# Digest false +# VerifyPeer true +# VerifyHost true +# CACert "/path/to/ca.crt" +# Header "X-Custom-Header: foobar" +# Post "foo=bar" +# +# MeasureResponseTime false +# MeasureResponseCode false +# +# Regex "]*> *([0-9]*\\.[0-9]+) *" +# DSType "GaugeAverage" +# Type "stock_value" +# Instance "AMD" +# +# +# + +# +## See: http://wiki.apache.org/couchdb/Runtime_Statistics +# +# Instance "httpd" +# +# Type "http_requests" +# +# +# +# Type "http_request_methods" +# +# +# +# Type "http_response_codes" +# +# +## Database status metrics: +# +# Instance "dbs" +# +# Type "gauge" +# +# +# Type "counter" +# +# +# Type "bytes" +# +# +# + +# +# +# Host "my_host" +# #Plugin "stats" +# Instance "some_instance" +# User "collectd" +# Password "thaiNg0I" +# Digest false +# VerifyPeer true +# VerifyHost true +# CACert "/path/to/ca.crt" +# Header "X-Custom-Header: foobar" +# Post "foo=bar" +# +# +# Type "magic_level" +# InstancePrefix "prefix-" +# InstanceFrom "td[1]" +# #PluginInstanceFrom "td[1]" +# ValuesFrom "td[2]/span[@class=\"level\"]" +# +# +# + +# +# +# Statement "SELECT 'customers' AS c_key, COUNT(*) AS c_value \ +# FROM customers_tbl" +# MinVersion 40102 +# MaxVersion 50042 +# +# Type "gauge" +# InstancePrefix "customer" +# InstancesFrom "c_key" +# ValuesFrom "c_value" +# +# +# +# +# #Plugin "mycompany" +# Driver "mysql" +# DriverOption "host" "localhost" +# DriverOption "username" "collectd" +# DriverOption "password" "secret" +# DriverOption "dbname" "custdb0" +# SelectDB "custdb0" +# Query "num_of_customers" +# Query "..." +# Host "..." +# +# + + +# Device "/dev/sda1" +# Device "192.168.0.2:/mnt/nfs" +# MountPoint "/home" +# FSType "ext3" + + # ignore rootfs; else, the root file-system would appear twice, causing + # one of the updates to fail and spam the log + FSType rootfs + # ignore the usual virtual / temporary file-systems + FSType sysfs + FSType proc + FSType devtmpfs + FSType devpts + FSType tmpfs + FSType fusectl + FSType cgroup + IgnoreSelected true + +# ReportByDevice false +# ReportInodes false + +# ValuesAbsolute true +# ValuesPercentage false + + +# +# Disk "hda" +# Disk "/sda[23]/" +# IgnoreSelected false +# UseBSDName false +# UdevNameAttr "DEVNAME" +# + +# +# Interface "eth0" +# IgnoreSource "192.168.0.1" +# SelectNumericQueryTypes false +# + +# +# +# Coremask "0x1" +# MemoryChannels "4" +# FilePrefix "rte" +# +# +# SendEventsOnUpdate true +# EnabledPortMask 0xffff +# PortName "interface1" +# PortName "interface2" +# SendNotification false +# +# +# SendEventsOnUpdate true +# LCoreMask "0xf" +# KeepAliveShmName "/dpdk_keepalive_shm_name" +# SendNotification false +# +# + +# +# +# Coremask "0x2" +# MemoryChannels "4" +# FilePrefix "rte" +# LogLevel "7" +# RteDriverLibPath "/usr/lib/dpdk-pmd" +# +# SharedMemObj "dpdk_collectd_stats_0" +# EnabledPortMask 0xffff +# PortName "interface1" +# PortName "interface2" +# + +# +# SocketFile "/var/run/collectd-email" +# SocketGroup "collectd" +# SocketPerms "0770" +# MaxConns 5 +# + +# +# Interface "eth0" +# Map "rx_csum_offload_errors" "if_rx_errors" "checksum_offload" +# Map "multicast" "if_multicast" +# MappedOnly false +# + +# +# Exec user "/path/to/exec" +# Exec "user:group" "/path/to/exec" +# NotificationExec user "/path/to/exec" +# + +# +# ValuesAbsolute true +# ValuesPercentage false +# + +# +# +# #Plugin "foo" +# Instance "foodir" +# Name "*.conf" +# MTime "-5m" +# Size "+10k" +# Recursive true +# IncludeHidden false +# RegularOnly true +# #FilesSizeType "bytes" +# #FilesCountType "files" +# #TypeInstance "instance" +# +# + +# +# MCReceiveFrom "239.2.11.71" "8649" +# +# +# Type "swap" +# TypeInstance "total" +# DataSource "value" +# +# +# +# Type "swap" +# TypeInstance "free" +# DataSource "value" +# +# + +# +# Host "127.0.0.1" +# Port "2947" +# Timeout 0.015 +# PauseConnect 5 +# + +# +# Host "127.0.0.1" +# Port 7634 +# + +# +# +# EnableSSL true +# SSLCACertificateFile "/path/to/root.pem" +# SSLCertificateFile "/path/to/server.pem" +# SSLCertificateKeyFile "/path/to/server.key" +# +# +# EnableSSL true +# SSLCACertificateFile "/path/to/root.pem" +# SSLCertificateFile "/path/to/client.pem" +# SSLCertificateKeyFile "/path/to/client.key" +# +# + +# +# ReportPerNodeHP true +# ReportRootHP true +# ValuesPages true +# ValuesBytes false +# ValuesPercentage false +# + +# +# Cores "0-2" +# + +# +# Interface "eth0" +# IgnoreSelected false +# ReportInactive true +# UniqueName false +# + +# +# +# Sensor "some_sensor" +# Sensor "another_one" +# IgnoreSelected false +# NotifySensorAdd false +# NotifySensorRemove true +# NotifySensorNotPresent false +# NotifyIPMIConnectionState false +# SELEnabled false +# SELClearEvent false +# +# +# Host "server.example.com" +# Address "1.2.3.4" +# Username "user" +# Password "secret" +# #AuthType "md5" +# Sensor "some_sensor" +# Sensor "another_one" +# IgnoreSelected false +# NotifySensorAdd false +# NotifySensorRemove true +# NotifySensorNotPresent false +# NotifyIPMIConnectionState false +# SELEnabled false +# SELClearEvent false +# +# + +# +# Chain "table" "chain" +# Chain6 "table" "chain" +# + +# +# Irq 7 +# Irq 8 +# Irq 9 +# IgnoreSelected true +# + +# +# JVMArg "-verbose:jni" +# JVMArg "-Djava.class.path=/usr/share/collectd/java/collectd-api.jar" +# +# LoadPlugin "org.collectd.java.GenericJMX" +# +# # See /usr/share/doc/collectd/examples/GenericJMX.conf +# # for an example config. +# +# + +# +# ReportRelative true +# + +# +# BasePath "/usr/share/collectd/lua" +# Script "script1.lua" +# Script "script2.lua" +# + +# +# Interface "wlan0" +# IgnoreSelected false +# Source "SysFS" +# WatchSet "None" +# WatchAdd "node_octets" +# WatchAdd "node_rssi" +# WatchAdd "is_rx_acl" +# WatchAdd "is_scan_active" +# + +# +# Host "127.0.0.1" +# Port 411 +# + +# +# +# McelogClientSocket "/var/run/mcelog-client" +# PersistentNotification false +# +# McelogLogfile "/var/log/mcelog" +# + +# +# Device "/dev/md0" +# IgnoreSelected false +# + +# +# +# Server "localhost" +# Key "page_key" +# +# Regex "(\\d+) bytes sent" +# ExcludeRegex "" +# DSType CounterAdd +# Type "ipt_octets" +# Instance "type_instance" +# +# +# + +# +# +# Socket "/var/run/memcached.sock" +# or: +# #Host "memcache.example.com" +# Address "127.0.0.1" +# Port "11211" +# +# + +# +# ValuesAbsolute true +# ValuesPercentage false +# + +# +# +# RegisterBase 1234 +# RegisterCmd ReadHolding +# RegisterType float +# Type gauge +# Instance "..." +# +# +# +# Address "addr" +# Port "1234" +# Interval 60 +# +# +# Instance "foobar" # optional +# Collect "data_name" +# +# +# + +# +# +# Host "localhost" +# Port 1883 +# ClientId "localhost" +# User "user" +# Password "secret" +# QoS 0 +# Prefix "collectd" +# StoreRates true +# Retain false +# CACert "/etc/ssl/ca.crt" +# CertificateFile "/etc/ssl/client.crt" +# CertificateKeyFile "/etc/ssl/client.pem" +# TLSProtocol "tlsv1.2" +# CipherSuite "ciphers" +# +# +# Host "localhost" +# Port 1883 +# ClientId "localhost" +# User "user" +# Password "secret" +# QoS 2 +# Topic "collectd/#" +# CleanSession true +# +# + +# +# +# Host "database.serv.er" +# Port "3306" +# User "db_user" +# Password "secret" +# Database "db_name" +# SSLKey "/path/to/key.pem" +# SSLCert "/path/to/cert.pem" +# SSLCA "/path/to/ca.pem" +# SSLCAPath "/path/to/cas/" +# SSLCipher "DHE-RSA-AES256-SHA" +# MasterStats true +# ConnectTimeout 10 +# InnodbStats true +# +# +# +# Alias "squeeze" +# Host "localhost" +# Socket "/var/run/mysql/mysqld.sock" +# SlaveStats true +# SlaveNotifications true +# +# +# +# Alias "galera" +# Host "localhost" +# Socket "/var/run/mysql/mysqld.sock" +# WsrepStats true +# +# + +# +# Interface "All" +# VerboseInterface "All" +# QDisc "eth0" "pfifo_fast-1:0" +# Class "ppp0" "htb-1:10" +# Filter "ppp0" "u32-1:0" +# IgnoreSelected false +# + +# +# # client setup: +# Server "ff18::efc0:4a42" "25826" +# +# SecurityLevel Encrypt +# Username "user" +# Password "secret" +# Interface "eth0" +# ResolveInterval 14400 +# +# TimeToLive 128 +# +# # server setup: +# Listen "ff18::efc0:4a42" "25826" +# +# SecurityLevel Sign +# AuthFile "/etc/collectd/passwd" +# Interface "eth0" +# +# MaxPacketSize 1452 +# +# # proxy setup (client and server as above): +# Forward true +# +# # statistics about the network plugin itself +# ReportStats false +# +# # "garbage collection" +# CacheFlush 1800 +# + +# +# ReportV2 false +# ReportV3 false +# ReportV4 false +# + +# +# URL "http://localhost/status?auto" +# User "www-user" +# Password "secret" +# VerifyPeer false +# VerifyHost false +# CACert "/etc/ssl/ca.crt" +# + +# +# OkayTimeout 1000 +# WarningTimeout 5000 +# FailureTimeout 0 +# + +# +# SMTPServer "localhost" +# SMTPPort 25 +# SMTPUser "my-username" +# SMTPPassword "my-password" +# From "collectd@main0server.com" +# # on . +# # Beware! Do not use not more than two placeholders (%)! +# Subject "[collectd] %s on %s!" +# Recipient "email1@domain1.net" +# Recipient "email2@domain2.com" +# + +# +# CommandFile "/var/lib/icinga/rw/icinga.cmd" +# + +# +# Host "localhost" +# Port 123 +# ReverseLookups false +# IncludeUnitID true +# + +# +# UPS "upsname@hostname:port" +# ForceSSL true +# VerifyPeer true +# CAPath "/path/to/folder" +# #ConnectTimeout 5000 +# + +# +# Host "127.0.0.1" +# Port "2006" +# CollectLinks "Summary" +# CollectRoutes "Summary" +# CollectTopology "Summary" +# + +# +# Device "-s localhost:4304" +# Sensor "F10FCA000800" +# IgnoreSelected false +# + +# +# +# URL "ldap://localhost:389" +# StartTLS false +# VerifyHost true +# CACert "/path/to/ca.crt" +# Timeout -1 +# Version 3 +# +# + +# +# StatusFile "/etc/openvpn/openvpn-status.log" +# ImprovedNamingSchema false +# CollectCompression true +# CollectIndividualUsers true +# CollectUserCount false +# + +# +# Port "6640" +# Address "127.0.0.1" +# Socket "/var/run/openvswitch/db.sock" +# Interfaces "br0" "veth0" +# SendNotification true +# DispatchValues false +# +# +# +# Port "6640" +# Address "127.0.0.1" +# Socket "/var/run/openvswitch/db.sock" +# Bridges "br0" "br_ext" +# + +# +# IncludeDir "/my/include/path" +# BaseName "Collectd::Plugins" +# EnableDebugger "" +# LoadPlugin Monitorus +# LoadPlugin OpenVZ +# +# +# Foo "Bar" +# Qux "Baz" +# +# + +# +# Address "::0" +# Port "30002" +# +# Host "host name" +# Server "server name" +# Script "script name" +# +# + +# +# Host "host.foo.bar" +# Host "host.baz.qux" +# Interval 1.0 +# Timeout 0.9 +# TTL 255 +# SourceAddress "1.2.3.4" +# Device "eth0" +# MaxMissed -1 +# + +# +# +# Statement "SELECT magic FROM wizard WHERE host = $1;" +# Param hostname +# +# +# Type gauge +# InstancePrefix "magic" +# ValuesFrom "magic" +# +# +# +# +# Statement "SELECT COUNT(type) AS count, type \ +# FROM (SELECT CASE \ +# WHEN resolved = 'epoch' THEN 'open' \ +# ELSE 'resolved' END AS type \ +# FROM tickets) type \ +# GROUP BY type;" +# +# +# Type counter +# InstancePrefix "rt36_tickets" +# InstancesFrom "type" +# ValuesFrom "count" +# +# +# +# +# # See /usr/share/doc/collectd-core/examples/postgresql/collectd_insert.sql for details +# Statement "SELECT collectd_insert($1, $2, $3, $4, $5, $6, $7, $8, $9);" +# StoreRates true +# +# +# +# #Plugin "kingdom" +# Host "hostname" +# Port 5432 +# User "username" +# Password "secret" +# +# SSLMode "prefer" +# KRBSrvName "kerberos_service_name" +# +# Query magic +# +# +# +# Interval 60 +# Service "service_name" +# +# Query backends # predefined +# Query rt36_tickets +# +# +# +# Service "collectd_store" +# Writer sqlstore +# # see collectd.conf(5) for details +# CommitInterval 30 +# +# + +# +# +# Collect "latency" +# Collect "udp-answers" "udp-queries" +# Socket "/var/run/pdns.controlsocket" +# +# +# Collect "questions" +# Collect "cache-hits" "cache-misses" +# Socket "/var/run/pdns_recursor.controlsocket" +# +# LocalSocket "/opt/collectd/var/run/collectd-powerdns" +# + +# +# CollectFileDescriptor true +# CollectContextSwitch true +# CollectMemoryMaps true +# Process "name" +# ProcessMatch "foobar" "/usr/bin/perl foobar\\.pl.*" +# +# CollectFileDescriptor false +# CollectContextSwitch false +# +# +# CollectFileDescriptor false +# CollectContextSwitch true +# +# + +# +# Value "/^Tcp:/" +# IgnoreSelected false +# + +# +# ModulePath "/path/to/your/python/modules" +# LogTraces true +# Interactive true +# Import "spam" +# +# +# spam "wonderful" "lovely" +# +# + +# +# +# Host "redis.example.com" +# Port "6379" +# Timeout 2000 +# +# + +# +# DaemonAddress "unix:/var/run/rrdcached.sock" +# DataDir "/var/lib/rrdcached/db/collectd" +# CreateFiles true +# CreateFilesAsync false +# CollectStatistics true +# +# The following settings are rather advanced +# and should usually not be touched: +# StepSize 10 +# HeartBeat 20 +# RRARows 1200 +# RRATimespan 158112000 +# XFF 0.1 +# + + + DataDir "/var/lib/collectd/rrd" +# CacheTimeout 120 +# CacheFlush 900 +# WritesPerSecond 30 +# CreateFilesAsync false +# RandomTimeout 0 +# +# The following settings are rather advanced +# and should usually not be touched: +# StepSize 10 +# HeartBeat 20 +# RRARows 1200 +# RRATimespan 158112000 +# XFF 0.1 + + +# +# SensorConfigFile "/etc/sensors3.conf" +# Sensor "it8712-isa-0290/temperature-temp1" +# Sensor "it8712-isa-0290/fanspeed-fan3" +# Sensor "it8712-isa-0290/voltage-in8" +# IgnoreSelected false +# + +# +# LogLevel 3 +# +# Driver "fluke-dmm" +# MinimumInterval 10 +# Conn "/dev/ttyUSB2" +# +# +# Driver "cem-dt-885x" +# Conn "/dev/ttyUSB1" +# +# + +# +# Disk "/^[hs]d[a-f][0-9]?$/" +# IgnoreSelected false +# + +# See /usr/share/doc/collectd/examples/snmp-data.conf.gz for a +# comprehensive sample configuration. +# +# +# Type "voltage" +# Table false +# Instance "input_line1" +# Scale 0.1 +# Values "SNMPv2-SMI::enterprises.6050.5.4.1.1.2.1" +# +# +# Type "users" +# Table false +# Instance "" +# Shift -1 +# Values "HOST-RESOURCES-MIB::hrSystemNumUsers.0" +# +# +# Type "if_octets" +# Table true +# InstancePrefix "traffic" +# Instance "IF-MIB::ifDescr" +# Values "IF-MIB::ifInOctets" "IF-MIB::ifOutOctets" +# +# +# +# Address "192.168.0.2" +# Version 1 +# Community "community_string" +# Collect "std_traffic" +# Inverval 120 +# Timeout 10 +# Retries 1 +# +# +# Address "192.168.0.42" +# Version 2 +# Community "another_string" +# Collect "std_traffic" "hr_users" +# +# +# Address "192.168.0.3" +# Version 1 +# Community "more_communities" +# Collect "powerplus_voltge_input" +# Interval 300 +# Timeout 5 +# Retries 5 +# +# + +# +# +# Plugin "memory" +# Type "memory" +# TypeInstance "free" +# OIDs "1.3.6.1.4.1.2021.4.6.0" +# +# +# IndexOID "IF-MIB::ifIndex" +# SizeOID "IF-MIB::ifNumber" +# +# Instance true +# Plugin "interface" +# OIDs "IF-MIB::ifDescr" +# +# +# Plugin "interface" +# Type "if_octets" +# TypeInstance "" +# OIDs "IF-MIB::ifInOctets" "IF-MIB::ifOutOctets" +# +#
+#
+ +# +# Host "::" +# Port "8125" +# DeleteCounters false +# DeleteTimers false +# DeleteGauges false +# DeleteSets false +# CounterSum false +# TimerPercentile 90.0 +# TimerPercentile 95.0 +# TimerPercentile 99.0 +# TimerLower false +# TimerUpper false +# TimerSum false +# TimerCount false +# + +# +# ReportByDevice false +# ReportBytes true +# ValuesAbsolute true +# ValuesPercentage false +# ReportIO true +# + +# +# +# #Plugin "table" +# Instance "slabinfo" +# Separator " " +# +# Type gauge +# InstancePrefix "active_objs" +# InstancesFrom 0 +# ValuesFrom 1 +# +# +# Type gauge +# InstancePrefix "objperslab" +# InstancesFrom 0 +# ValuesFrom 4 +# +#
+#
+ +# +# +# Instance "exim" +# Interval 60 +# +# Regex "S=([1-9][0-9]*)" +# DSType "CounterAdd" +# Type "ipt_bytes" +# Instance "total" +# +# +# Regex "\\" +# ExcludeRegex "\\.*mail_spool defer" +# DSType "CounterInc" +# Type "counter" +# Instance "local_user" +# +# +# +# #Use the following log format in nginx: +# #log_format response_time '[$host] "$upstream_response_time" ...' +# Instance "apache" +# +# Regex "^\\S+ \"([0-9.]+)\"" +# +# Percentile 80 # -> latency-foo-80 +# Percentile 95 # -> latency-foo-95 +# Percentile 99 # -> latency-foo-99 +# Bucket 0 0.1 # -> bucket-latency-foo-0_0.1 +# Bucket 0.1 0.2 # -> bucket-latency-foo-0.1_0.2 +# Bucket 0.2 0.5 # -> bucket-latency-foo-0.2_0.5 +# Bucket 0.5 1.0 # -> bucket-latency-foo-0.5_1 +# Bucket 1.0 2.0 # -> bucket-latency-foo-1_2 +# Bucket 2.0 0 # -> bucket-latency-foo-2_inf +# #BucketType "bucket" +# +# Type "latency" +# Instance "foo" +# +# +# + +# +# +# Type "percent" +# Instance "dropped" +# ValueFrom 1 +# +# +# Type "bytes" +# Instance "wire-realtime" +# ValueFrom 2 +# +# +# Type "alerts_per_second" +# ValueFrom 3 +# +# +# Type "kpackets_wire_per_sec.realtime" +# ValueFrom 4 +# +# +# Instance "snort-eth0" +# Interval 600 +# Collect "dropped" "mbps" "alerts" "kpps" +# TimeFrom 0 +# +# + +# +# ListeningPorts false +# AllPortsSummary false +# LocalPort "25" +# RemotePort "25" +# + +# +# Host "127.0.0.1" +# Port "51234" +# Server "8767" +# + +# +# Device "/dev/ttyUSB0" +# Retries 0 +# + +# +# ForceUseProcfs false +# Device "THRM" +# IgnoreSelected false +# + +# +# Host "localhost" +# Port "1978" +# + +# +## None of the following option should be set manually +## This plugin automatically detect most optimal options +## Only set values here if: +## - The module asks you to +## - You want to disable the collection of some data +## - Your (Intel) CPU is not supported (yet) by the module +## - The module generates a lot of errors 'MSR offset 0x... read failed' +## In the last two cases, please open a bug request +# +# TCCActivationTemp "100" +# CoreCstates "392" +# PackageCstates "396" +# SystemManagementInterrupt true +# DigitalTemperatureSensor true +# PackageThermalManagement true +# RunningAveragePowerLimit "7" +# + +# +# SocketFile "/var/run/collectd-unixsock" +# SocketGroup "collectd" +# SocketPerms "0660" +# DeleteSocket false +# + +# +# UUIDFile "/etc/uuid" +# + +# +# This tag support an argument if you want to +# monitor the local instance just use +# If you prefer defining another instance you can do +# so by using +# +# CollectBackend true +# CollectBan false # Varnish 3 and above +# CollectCache true +# CollectConnections true +# CollectDirectorDNS false # Varnish 3 only +# CollectESI false +# CollectFetch false +# CollectHCB false +# CollectObjects false +# CollectPurge false # Varnish 2 only +# CollectSession false +# CollectSHM true +# CollectSMA false # Varnish 2 & 4 only +# CollectSMS false +# CollectSM false # Varnish 2 only +# CollectStruct false +# CollectTotals false +# CollectUptime false # Varnish 3 and above +# CollectVCL false +# CollectVSM false # Varnish 4 only +# CollectWorkers false +# CollectLock false # Varnish 4 only +# CollectMempool false # Varnish 4 only +# CollectManagement false # Varnish 4 only +# CollectSMF false # Varnish 4 only +# CollectVBE false # Varnish 4 only +# CollectMSE false # Varnish-Plus 4 only +# +# + +# +# Connection "xen:///" +# RefreshInterval 60 +# Domain "name" +# BlockDevice "name:device" +# BlockDeviceFormat target +# BlockDeviceFormatBasename false +# InterfaceDevice "name:device" +# IgnoreSelected false +# HostnameFormat name +# InterfaceFormat name +# PluginInstanceFormat name +# Instances 1 +# ExtraStats "cpu_util disk disk_err domain_state fs_info job_stats_background pcpu perf vcpupin" +# + +# +# Verbose false +# + +# +# +# Host "localhost" +# Port "2003" +# Protocol "tcp" +# ReconnectInterval 0 +# LogSendErrors true +# Prefix "collectd" +# Postfix "collectd" +# StoreRates true +# AlwaysAppendDS false +# EscapeCharacter "_" +# SeparateInstances false +# PreserveSeparator false +# DropDuplicateFields false +# +# + +# +# +# URL "http://example.com/collectd-post" +# User "collectd" +# Password "secret" +# VerifyPeer true +# VerifyHost true +# CACert "/etc/ssl/ca.crt" +# CAPath "/etc/ssl/certs/" +# ClientKey "/etc/ssl/client.pem" +# ClientCert "/etc/ssl/client.crt" +# ClientKeyPass "secret" +# Header "X-Custom-Header: custom_value" +# SSLVersion "TLSv1" +# Format "Command" +# Prefix "collectd" # metric prefix, only available for KAIROSDB format +# Attribute "key" "value" # only available for KAIROSDB format +# TTL 0 # data ttl, only available for KAIROSDB format +# Metrics true +# Notifications false +# StoreRates false +# BufferSize 4096 +# LowSpeedLimit 0 +# Timeout 0 +# Prefix "collectd/" +# +# + +# +# Property "metadata.broker.list" "localhost:9092" +# +# Format JSON +# +# + +# +# +# Host "localhost" +# Port "27017" +# Timeout 1000 +# StoreRates false +# Database "auth_db" +# User "auth_user" +# Password "auth_passwd" +# +# + +# +# Port "9103" +# + +# +# +# Host "localhost" +# Port "6379" +# Timeout 1000 +# +# + +# +# +# Host "localhost" +# Port 5555 +# Protocol TCP +# Batch true +# BatchMaxSize 8192 +# StoreRates true +# AlwaysAppendDS false +# TTLFactor 2.0 +# Notifications true +# CheckThresholds false +# EventServicePrefix "" +# +# Tag "foobar" +# Attribute "foo" "bar" +# + +# +# +# Host "localhost" +# Port 3030 +# StoreRates true +# AlwaysAppendDS false +# Notifications true +# Metrics true +# EventServicePrefix "" +# MetricHandler "influx" +# MetricHandler "default" +# NotificationHandler "flapjack" +# NotificationHandler "howling_monkey" +# +# Tag "foobar" +# Attribute "foo" "bar" +# + + + + Host "TaosadapterIp" + Port "TaosadapterPort" + HostTags "status=production" + StoreRates false + AlwaysAppendDS false + + + +# +# Host "localhost" +# Port "2181" +# + + + Filter "*.conf" + + diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/entrypoint.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..144c59c6232f4eb8b27d665622a3b330b2924c5c --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/entrypoint.sh @@ -0,0 +1,4 @@ +#!/bin/bash +sed -i 's/CollectdHostname/'$HOSTNAME'/g;s/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;s/CollectdInterval/'$CollectdInterval'/g;' /etc/collectd/collectd.conf +/etc/init.d/collectd start +tail -f /dev/null diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/run_collectd.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/run_collectd.sh new file mode 100755 index 0000000000000000000000000000000000000000..0279f6e2fc2e8e27e729e27a36204b965550b49e --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/collectd_docker/run_collectd.sh @@ -0,0 +1,59 @@ +#!/bin/bash +case "$1" in + -h|--help) + echo "Usage:" + echo "1st arg: agent_count" + echo "2nd arg: container_hostname prefix" + echo "3rd arg: TaosadapterIp" + echo "4th arg: TaosadapterPort" + echo "5th arg: CollectdInterval" + echo "eg: ./run_collectd.sh 1 collectd_agent1 172.26.10.86 6047 1" + echo "eg: ./run_collectd.sh 2 collectd_agent* 172.26.10.86 6047 1" + echo "rm all: ./run_collectd.sh rm collectd_agent*" + exit 0 +;; +esac + +if [ $1 == "rm" ]; then + docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm + exit +fi + +if [ ! -n "$1" ]; then + echo "please input 1st arg" + exit +fi +if [ ! -n "$2" ]; then + echo "please input 2nd arg" + exit +fi +if [ ! -n "$3" ]; then + echo "please input 3rd arg" + exit +fi +if [ ! -n "$4" ]; then + echo "please input 4th arg" + exit +fi +if [ ! -n "$5" ]; then + echo "please input 5th arg" + exit +fi +if [ $1 -eq 1 ];then + docker ps | grep $2 + if [ $? -eq 0 ];then + docker stop $2 && docker rm $2 + fi + docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e CollectdInterval=$5 taosadapter_collectd:v1 /bin/bash +else + perfix=`echo $2 | cut -d '*' -f 1` + for i in `seq 1 $1`; + do + docker ps | grep $perfix$i + if [ $? -eq 0 ];then + docker stop $perfix$i && docker rm $perfix$i + fi + docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e CollectdInterval=$5 taosadapter_collectd:v1 /bin/bash + done +fi +#docker run -itd --name collectd_agent1 -h collectd_agent1 -e CollectdHostname=collectd_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6047 -e CollectdInterval=1 taosadapter_collectd:v1 /bin/bash diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/Dockerfile b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7d2b138710fefd7caf7f9256ae112227eb3b0ff9 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/Dockerfile @@ -0,0 +1,22 @@ +FROM ubuntu:20.04 +ENV REFRESHED_AT 2021-12-05 +ARG DEBIAN_FRONTEND=noninteractive +WORKDIR /root +RUN set -ex; \ + apt update -y --fix-missing && \ + apt install -y gnupg +COPY icinga-focal.list /etc/apt/sources.list.d/icinga-focal.list +COPY icinga.key /root/icinga.key +RUN set -ex; \ + apt-key add icinga.key && \ + apt update -y --fix-missing && \ + apt-get install -y --no-install-recommends icinga2 monitoring-plugins systemctl && \ + icinga2 feature enable opentsdb && \ + rm -rf /var/lib/apt/lists/* +COPY opentsdb.conf /etc/icinga2/features-available/opentsdb.conf +COPY entrypoint.sh /entrypoint.sh +COPY templates.conf /etc/icinga2/conf.d/templates.conf +ENV Icinga2Interval 10s +ENV TaosadapterIp 127.0.0.1 +ENV TaosadapterPort 6048 +ENTRYPOINT ["/entrypoint.sh"] diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/entrypoint.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..ae4e5ca58217aed5c4e013d5c8ef74c264f788d1 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/entrypoint.sh @@ -0,0 +1,5 @@ +#!/bin/bash +sed -i 's/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;' /etc/icinga2/features-available/opentsdb.conf +sed -i 's/Icinga2Interval/'$Icinga2Interval'/g;' /etc/icinga2/conf.d/templates.conf +systemctl restart icinga2 +tail -f /dev/null diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/icinga-focal.list b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/icinga-focal.list new file mode 100644 index 0000000000000000000000000000000000000000..b1e588858e7a33d1f17d7367a684150e25380289 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/icinga-focal.list @@ -0,0 +1,2 @@ +deb http://packages.icinga.com/ubuntu icinga-focal main +deb-src http://packages.icinga.com/ubuntu icinga-focal main diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/icinga.key b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/icinga.key new file mode 100644 index 0000000000000000000000000000000000000000..901c78cbde3363299fb93c7e4bd1a41cf9c1e5a3 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/icinga.key @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.19 (GNU/Linux) + +mQGiBFKHzk4RBACSHMIFTtfw4ZsNKAA03Gf5t7ovsKWnS7kcMYleAidypqhOmkGg +0petiYsMPYT+MOepCJFGNzwQwJhZrdLUxxMSWay4Xj0ArgpD9vbvU+gj8Tb02l+x +SqNGP8jXMV5UnK4gZsrYGLUPvx47uNNYRIRJAGOPYTvohhnFJiG402dzlwCg4u5I +1RdFplkp9JM6vNM9VBIAmcED/2jr7UQGsPs8YOiPkskGHLh/zXgO8SvcNAxCLgbp +BjGcF4Iso/A2TAI/2KGJW6kBW/Paf722ltU6s/6mutdXJppgNAz5nfpEt4uZKZyu +oSWf77179B2B/Wl1BsX/Oc3chscAgQb2pD/qPF/VYRJU+hvdQkq1zfi6cVsxyREV +k+IwA/46nXh51CQxE29ayuy1BoIOxezvuXFUXZ8rP6aCh4KaiN9AJoy7pBieCzsq +d7rPEeGIzBjI+yhEu8p92W6KWzL0xduWfYg9I7a2GTk8CaLX2OCLuwnKd7RVDyyZ +yzRjWs0T5U7SRAWspLStYxMdKert9lLyQiRHtLwmlgBPqa0gh7Q+SWNpbmdhIE9w +ZW4gU291cmNlIE1vbml0b3JpbmcgKEJ1aWxkIHNlcnZlcikgPGluZm9AaWNpbmdh +Lm9yZz6IYAQTEQIAIAUCUofOTgIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJ +EMbjGcM0QQaCgSQAnRjXdbsyqziqhmxfAKffNJYuMPwdAKCS/IRCVyQzApFBtIBQ +1xuoym/4C7kCDQRSh85OEAgAvPwjlURCi8z6+7i60no4n16dNcSzd6AT8Kizpv2r +9BmNBff/GNYGnHyob/DMtmO2esEuVG8w62rO9m1wzzXzjbtmtU7NZ1Tg+C+reU2I +GNVu3SYtEVK/UTJHAhLcgry9yD99610tYPN2Fx33Efse94mXOreBfCvDsmFGSc7j +GVNCWXpMR3jTYyGj1igYd5ztOzG63D8gPyOucTTl+RWN/G9EoGBv6sWqk5eCd1Fs +JlWyQX4BJn3YsCZx3uj1DWL0dAl2zqcn6m1M4oj1ozW47MqM/efKOcV6VvCs9SL8 +F/NFvZcH4LKzeupCQ5jEONqcTlVlnLlIqId95Z4DI4AV9wADBQf/S6sKA4oH49tD +Yb5xAfUyEp5ben05TzUJbXs0Z7hfRQzy9+vQbWGamWLgg3QRUVPx1e4IT+W5vEm5 +dggNTMEwlLMI7izCPDcD32B5oxNVxlfj428KGllYWCFj+edY+xKTvw/PHnn+drKs +LE65Gwx4BPHm9EqWHIBX6aPzbgbJZZ06f6jWVBi/N7e/5n8lkxXqS23DBKemapyu +S1i56sH7mQSMaRZP/iiOroAJemPNxv1IQkykxw2woWMmTLKLMCD/i+4DxejE50tK +dxaOLTc4HDCsattw/RVJO6fwE414IXHMv330z4HKWJevMQ+CmQGfswvCwgeBP9n8 +PItLjBQAXIhJBBgRAgAJBQJSh85OAhsMAAoJEMbjGcM0QQaCzpAAmwUNoRyySf9p +5G3/2UD1PMueIwOtAKDVVDXEq5LJPVg4iafNu0SRMwgP0Q== +=icbY +-----END PGP PUBLIC KEY BLOCK----- diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/opentsdb.conf b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/opentsdb.conf new file mode 100644 index 0000000000000000000000000000000000000000..51d6484de968c7ada14e8ba2848426a5b2d45547 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/opentsdb.conf @@ -0,0 +1,25 @@ +/** + * The OpenTsdbWriter type writes check result metrics and + * performance data to a OpenTSDB tcp socket. + */ + +object OpenTsdbWriter "opentsdb" { + host = "TaosadapterIp" + port = TaosadapterPort + //enable_generic_metrics = false + + // Custom Tagging, refer to Icinga object type documentation for + // OpenTsdbWriter + //host_template = { + // metric = "icinga.host" + // tags = { + // zone = "$host.zone$" + // } + //} + //service_template = { + // metric = "icinga.service.$service.check_command$" + // tags = { + // zone = "$service.zone$" + // } + //} +} diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/run_icinga2.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/run_icinga2.sh new file mode 100755 index 0000000000000000000000000000000000000000..9fc75bc02ade19f4f11a341eca1b6318d5185dc1 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/run_icinga2.sh @@ -0,0 +1,59 @@ +#!/bin/bash +case "$1" in + -h|--help) + echo "Usage:" + echo "1st arg: agent_count" + echo "2nd arg: container_hostname prefix" + echo "3rd arg: TaosadapterIp" + echo "4th arg: TaosadapterPort" + echo "5th arg: Icinga2Interval" + echo "eg: ./run_icinga2.sh 1 icinga2_agent1 172.26.10.86 6048 1" + echo "eg: ./run_icinga2.sh 2 icinga2_agent* 172.26.10.86 6048 1" + exit 0 +;; +esac + +if [ $1 == "rm" ]; then + docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm + exit +fi + +if [ ! -n "$1" ]; then + echo "please input 1st arg" + exit +fi +if [ ! -n "$2" ]; then + echo "please input 2nd arg" + exit +fi +if [ ! -n "$3" ]; then + echo "please input 3rd arg" + exit +fi +if [ ! -n "$4" ]; then + echo "please input 4th arg" + exit +fi +if [ ! -n "$5" ]; then + echo "please input 5th arg" + exit +fi + +if [ $1 -eq 1 ];then + docker ps | grep $2 + if [ $? -eq 0 ];then + docker stop $2 && docker rm $2 + fi + docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e Icinga2Interval=$5 taosadapter_icinga2:v1 /bin/bash +else + perfix=`echo $2 | cut -d '*' -f 1` + for i in `seq 1 $1`; + do + docker ps | grep $perfix$i + if [ $? -eq 0 ];then + docker stop $perfix$i && docker rm $perfix$i + fi + docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e Icinga2Interval=$5 taosadapter_icinga2:v1 /bin/bash + done +fi +#docker run -itd --name icinga2_agent1 -h icinga2_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6048 -e Icinga2Interval=1s taosadapter_icinga2:v1 /bin/bash diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/templates.conf b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/templates.conf new file mode 100644 index 0000000000000000000000000000000000000000..cd49e488142470b1085fb399b6e51891dfe6b5f3 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/icinga2_docker/templates.conf @@ -0,0 +1,83 @@ +/* + * Generic template examples. + */ + + +/** + * Provides default settings for hosts. By convention + * all hosts should import this template. + * + * The CheckCommand object `hostalive` is provided by + * the plugin check command templates. + * Check the documentation for details. + */ +template Host "generic-host" { + max_check_attempts = 3 + check_interval = Icinga2Interval + retry_interval = 30s + + check_command = "hostalive" +} + +/** + * Provides default settings for services. By convention + * all services should import this template. + */ +template Service "generic-service" { + max_check_attempts = 5 + check_interval = Icinga2Interval + retry_interval = 30s +} + +/** + * Provides default settings for users. By convention + * all users should inherit from this template. + */ + +template User "generic-user" { + +} + +/** + * Provides default settings for host notifications. + * By convention all host notifications should import + * this template. + */ +template Notification "mail-host-notification" { + command = "mail-host-notification" + + states = [ Up, Down ] + types = [ Problem, Acknowledgement, Recovery, Custom, + FlappingStart, FlappingEnd, + DowntimeStart, DowntimeEnd, DowntimeRemoved ] + + vars += { + // notification_icingaweb2url = "https://www.example.com/icingaweb2" + // notification_from = "Icinga 2 Host Monitoring " + notification_logtosyslog = false + } + + period = "24x7" +} + +/** + * Provides default settings for service notifications. + * By convention all service notifications should import + * this template. + */ +template Notification "mail-service-notification" { + command = "mail-service-notification" + + states = [ OK, Warning, Critical, Unknown ] + types = [ Problem, Acknowledgement, Recovery, Custom, + FlappingStart, FlappingEnd, + DowntimeStart, DowntimeEnd, DowntimeRemoved ] + + vars += { + // notification_icingaweb2url = "https://www.example.com/icingaweb2" + // notification_from = "Icinga 2 Service Monitoring " + notification_logtosyslog = false + } + + period = "24x7" +} diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/Dockerfile b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..464a5bd48736abdf06a0cb00d36426d91528efda --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/Dockerfile @@ -0,0 +1,17 @@ +FROM ubuntu:20.04 +ENV REFRESHED_AT 2021-12-05 +WORKDIR /root +ARG DEBIAN_FRONTEND=noninteractive +RUN set -ex; \ + apt update -y --fix-missing && \ + apt-get install -y --no-install-recommends wget && \ + wget http://39.105.163.10:9000/node_exporter-1.3.0.linux-amd64.tar.gz && \ + tar -xvf node_exporter-1.3.0.linux-amd64.tar.gz && \ + mv node_exporter-1.3.0.linux-amd64/node_exporter /usr/bin/node_exporter && \ + rm -rf node_exporter-1.3.0.linux-amd64 node_exporter-1.3.0.linux-amd64.tar.gz &&\ + apt remove -y wget && \ + rm -rf /var/lib/apt/lists/* +COPY entrypoint.sh /entrypoint.sh +ENV NodeExporterHostname localhost +ENV NodeExporterInterval 10 +ENTRYPOINT ["/entrypoint.sh"] diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/entrypoint.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..475fafd3ae831697cda5ed817f181b815f42c335 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/entrypoint.sh @@ -0,0 +1,3 @@ +#!/bin/bash +nohup /usr/bin/node_exporter & +tail -f /dev/null diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/gen_taosadapter_url.py b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/gen_taosadapter_url.py new file mode 100644 index 0000000000000000000000000000000000000000..05acd563758428be055b1728fdc1ebbcb6ee153f --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/gen_taosadapter_url.py @@ -0,0 +1,15 @@ +import sys +## eg: python3 gen_taosadapter_url.py 172.26.10.87 10000:10050 + +#TaosadapterIp = "172.26.10.87" +TaosadapterIp = sys.argv[1] +#TaosadapterPort = "10000:10050" +TaosadapterPort = sys.argv[2] +start_port = int(TaosadapterPort.split(":")[0]) +end_port = int(TaosadapterPort.split(":")[1]) +urls_list = [] +for port in range(start_port, end_port+1): + urls_list.append(f"http://{TaosadapterIp}:{port}") +print(urls_list) + + diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/run_node_exporter.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/run_node_exporter.sh new file mode 100755 index 0000000000000000000000000000000000000000..749005225455eb4ccd5b179a96dd21fc70f55a5b --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/node_exporter_docker/run_node_exporter.sh @@ -0,0 +1,46 @@ +#!/bin/bash +case "$1" in + -h|--help) + echo "Usage:" + echo "1st arg: port range" + echo "2nd arg: container_hostname prefix" + echo "eg: ./run_node_exporter.sh 10000 node_exporter_agent1" + echo "eg: ./run_node_exporter.sh 10000:10010 node_exporter_agent*" + exit 0 +;; +esac + +if [ $1 == "rm" ]; then + docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm + exit +fi + +if [ ! -n "$1" ]; then + echo "please input 1st arg" + exit +fi +if [ ! -n "$2" ]; then + echo "please input 2nd arg" + exit +fi + +if [ ! `echo $1 | grep :` ];then + docker ps | grep $2 + if [ $? -eq 0 ];then + docker stop $2 && docker rm $2 + fi + docker run -itd --name $2 -h $2 -p $1:9100 taosadapter_node_exporter:v1 /bin/bash +else + perfix=`echo $2 | cut -d '*' -f 1` + start_port=`echo $1 | cut -d ':' -f 1` + end_port=`echo $1 | cut -d ':' -f 2` + for i in `seq $start_port $end_port`; + do + docker ps | grep $perfix$i + if [ $? -eq 0 ];then + docker stop $perfix$i && docker rm $perfix$i + fi + docker run -itd --name $perfix$i -h $perfix$i -p $i:9100 taosadapter_node_exporter:v1 /bin/bash + done +fi +#docker run -itd --name node_exporter_agent1 -h node_exporter_agent1 -p 10000:9100 taosadapter_node_exporter:v1 /bin/bash diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/rm_all.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/rm_all.sh new file mode 100755 index 0000000000000000000000000000000000000000..a6cc8aa61f994ebe8dbd00abe726f38504def879 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/rm_all.sh @@ -0,0 +1,7 @@ +#!/bin/bash +./collectd_docker/run_collectd.sh rm collectd_agent* +./icinga2_docker/run_icinga2.sh rm icinga2_agent* +./statsd_docker/run_statsd.sh rm statsd_agent* +./tcollector_docker/run_tcollector.sh rm tcollector_agent* +./telegraf_docker/run_telegraf.sh rm telegraf_agent* +./node_exporter_docker/run_node_exporter.sh rm node_exporter_agent* diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/run_all.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/run_all.sh new file mode 100755 index 0000000000000000000000000000000000000000..130af2e6503ded5ff56cd9fefe06d1989d4b0c19 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/run_all.sh @@ -0,0 +1,42 @@ +#!/bin/bash +case "$1" in + -h|--help) + echo "Usage:" + echo "1st arg: collectd_count" + echo "2nd arg: icinga2_count" + echo "3rd arg: statsd_count" + echo "4th arg: tcollector_count" + echo "5th arg: telegraf_count" + echo "6th arg: node_exporter port range" + echo "eg: ./run_all.sh 10 10 1 10 50 10000:10010" + exit 0 +;; +esac +collectd_count=$1 +icinga2_count=$2 +statsd_count=$3 +tcollector_count=$4 +telegraf_count=$5 +node_exporter_count=$6 +taosadapter1_ip=172.26.10.86 +taosadapter2_ip=172.26.10.85 +taosadapter3_ip=172.26.10.84 +./collectd_docker/run_collectd.sh $1 taosadapter1_collectd_agent* $taosadapter1_ip 6047 1 +./icinga2_docker/run_icinga2.sh $2 taosadapter1_icinga2_agent* $taosadapter1_ip 6048 1 +./statsd_docker/run_statsd.sh $3 taosadapter1_statsd_agent $taosadapter1_ip 6044 +./tcollector_docker/run_tcollector.sh $4 taosadapter1_tcollector_agent* $taosadapter1_ip 6049 +./telegraf_docker/run_telegraf.sh $5 taosadapter1_telegraf_agent* $taosadapter1_ip 6041 10s taosadapter1_telegraf + +./collectd_docker/run_collectd.sh $1 taosadapter2_collectd_agent* $taosadapter2_ip 6047 1 +./icinga2_docker/run_icinga2.sh $2 taosadapter2_icinga2_agent* $taosadapter2_ip 6048 1 +./statsd_docker/run_statsd.sh $3 taosadapter2_statsd_agent $taosadapter2_ip 6044 +./tcollector_docker/run_tcollector.sh $4 taosadapter2_tcollector_agent* $taosadapter2_ip 6049 +./telegraf_docker/run_telegraf.sh $5 taosadapter2_telegraf_agent* $taosadapter2_ip 6041 10s taosadapter2_telegraf + +./collectd_docker/run_collectd.sh $1 taosadapter3_collectd_agent* $taosadapter3_ip 6047 1 +./icinga2_docker/run_icinga2.sh $2 taosadapter3_icinga2_agent* $taosadapter3_ip 6048 1 +./statsd_docker/run_statsd.sh $3 taosadapter3_statsd_agent $taosadapter3_ip 6044 +./tcollector_docker/run_tcollector.sh $4 taosadapter3_tcollector_agent* $taosadapter3_ip 6049 +./telegraf_docker/run_telegraf.sh $5 taosadapter3_telegraf_agent* $taosadapter3_ip 6041 10s taosadapter3_telegraf + +./node_exporter_docker/run_node_exporter.sh $6 node_exporter_agent* diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/Dockerfile b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4ad18b03ed60bcf383d1bf7491cabc1b60f9b985 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:20.04 +ENV REFRESHED_AT 2021-12-06 +WORKDIR /root +ARG DEBIAN_FRONTEND=noninteractive +RUN set -ex; \ + apt update -y --fix-missing && \ + apt-get install -y --no-install-recommends nodejs devscripts debhelper wget netcat-traditional npm && \ + wget http://39.105.163.10:9000/statsd.tar.gz && \ + tar -xvf statsd.tar.gz && \ + cd statsd && \ + npm install && \ + npm audit fix && \ + rm -rf statsd.tar.gz && \ + apt remove -y wget && \ + rm -rf /var/lib/apt/lists/* +COPY config.js /root/statsd/config.js +COPY entrypoint.sh /entrypoint.sh +ENV TaosadapterIp 127.0.0.1 +ENV TaosadapterPort 6044 +ENTRYPOINT ["/entrypoint.sh"] diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/config.js b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/config.js new file mode 100644 index 0000000000000000000000000000000000000000..f48318e23fd9c6fc852be59abf150927fce290dc --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/config.js @@ -0,0 +1,132 @@ +/* +Graphite Required Variable: + +(Leave this unset to avoid sending stats to Graphite. + Set debug flag and leave this unset to run in 'dry' debug mode - + useful for testing statsd clients without a Graphite server.) + + graphiteHost: hostname or IP of Graphite server + +Optional Variables: + + graphitePort: port for the graphite text collector [default: 2003] + graphitePicklePort: port for the graphite pickle collector [default: 2004] + graphiteProtocol: either 'text' or 'pickle' [default: 'text'] + backends: an array of backends to load. Each backend must exist + by name in the directory backends/. If not specified, + the default graphite backend will be loaded. + * example for console and graphite: + [ "./backends/console", "./backends/graphite" ] + + servers: an array of server configurations. + If not specified, the server, address, + address_ipv6, and port top-level configuration + options are used to configure a single server for + backwards-compatibility + Each server configuration supports the following keys: + server: the server to load. The server must exist by name in the directory + servers/. If not specified, the default udp server will be loaded. + * example for tcp server: + "./servers/tcp" + address: address to listen on [default: 0.0.0.0] + address_ipv6: defines if the address is an IPv4 or IPv6 address [true or false, default: false] + port: port to listen for messages on [default: 8125] + socket: (only for tcp servers) path to unix domain socket which will be used to receive + metrics [default: undefinded] + socket_mod: (only for tcp servers) file mode which should be applied to unix domain socket, relevant + only if socket option is used [default: undefined] + + debug: debug flag [default: false] + mgmt_address: address to run the management TCP interface on + [default: 0.0.0.0] + mgmt_port: port to run the management TCP interface on [default: 8126] + title: Allows for overriding the process title. [default: statsd] + if set to false, will not override the process title and let the OS set it. + The length of the title has to be less than or equal to the binary name + cli arguments + NOTE: This does not work on Mac's with node versions prior to v0.10 + + healthStatus: default health status to be returned and statsd process starts ['up' or 'down', default: 'up'] + dumpMessages: log all incoming messages + flushInterval: interval (in ms) to flush metrics to each backend + percentThreshold: for time information, calculate the Nth percentile(s) + (can be a single value or list of floating-point values) + negative values mean to use "top" Nth percentile(s) values + [%, default: 90] + flush_counts: send stats_counts metrics [default: true] + + keyFlush: log the most frequently sent keys [object, default: undefined] + interval: how often to log frequent keys [ms, default: 0] + percent: percentage of frequent keys to log [%, default: 100] + log: location of log file for frequent keys [default: STDOUT] + deleteIdleStats: don't send values to graphite for inactive counters, sets, gauges, or timers + as opposed to sending 0. For gauges, this unsets the gauge (instead of sending + the previous value). Can be individually overridden. [default: false] + deleteGauges: don't send values to graphite for inactive gauges, as opposed to sending the previous value [default: false] + gaugesMaxTTL: number of flush cycles to wait before the gauge is marked as inactive, to use in combination with deleteGauges [default: 1] + deleteTimers: don't send values to graphite for inactive timers, as opposed to sending 0 [default: false] + deleteSets: don't send values to graphite for inactive sets, as opposed to sending 0 [default: false] + deleteCounters: don't send values to graphite for inactive counters, as opposed to sending 0 [default: false] + prefixStats: prefix to use for the statsd statistics data for this running instance of statsd [default: statsd] + applies to both legacy and new namespacing + keyNameSanitize: sanitize all stat names on ingress [default: true] + If disabled, it is up to the backends to sanitize keynames + as appropriate per their storage requirements. + + calculatedTimerMetrics: List of timer metrics that will be sent. Default will send all metrics. + To filter on percents and top percents: append '_percent' to the metric name. + Example: calculatedTimerMetrics: ['count', 'median', 'upper_percent', 'histogram'] + + console: + prettyprint: whether to prettyprint the console backend + output [true or false, default: true] + + log: log settings [object, default: undefined] + backend: where to log: stdout or syslog [string, default: stdout] + application: name of the application for syslog [string, default: statsd] + level: log level for [node-]syslog [string, default: LOG_INFO] + + graphite: + legacyNamespace: use the legacy namespace [default: true] + globalPrefix: global prefix to use for sending stats to graphite [default: "stats"] + prefixCounter: graphite prefix for counter metrics [default: "counters"] + prefixTimer: graphite prefix for timer metrics [default: "timers"] + prefixGauge: graphite prefix for gauge metrics [default: "gauges"] + prefixSet: graphite prefix for set metrics [default: "sets"] + globalSuffix: global suffix to use for sending stats to graphite [default: ""] + This is particularly useful for sending per host stats by + settings this value to: require('os').hostname().split('.')[0] + + repeater: an array of hashes of the for host: and port: + that details other statsd servers to which the received + packets should be "repeated" (duplicated to). + e.g. [ { host: '10.10.10.10', port: 8125 }, + { host: 'observer', port: 88125 } ] + + repeaterProtocol: whether to use udp4, udp6, or tcp for repeaters. + ["udp4," "udp6", or "tcp" default: "udp4"] + + histogram: for timers, an array of mappings of strings (to match metrics) and + corresponding ordered non-inclusive upper limits of bins. + For all matching metrics, histograms are maintained over + time by writing the frequencies for all bins. + 'inf' means infinity. A lower limit of 0 is assumed. + default: [], meaning no histograms for any timer. + First match wins. examples: + * histogram to only track render durations, with unequal + class intervals and catchall for outliers: + [ { metric: 'render', bins: [ 0.01, 0.1, 1, 10, 'inf'] } ] + * histogram for all timers except 'foo' related, + equal class interval and catchall for outliers: + [ { metric: 'foo', bins: [] }, + { metric: '', bins: [ 50, 100, 150, 200, 'inf'] } ] + + automaticConfigReload: whether to watch the config file and reload it when it + changes. The default is true. Set this to false to disable. +*/ +{ + graphitePort: 2003 +, graphiteHost: "127.0.0.1" +, port: 8125 +, backends: [ "./backends/console", "./backends/graphite", "./backends/repeater" ] +, repeater: [ { host: '127.0.0.1', port: 8125 }, { host:'TaosadapterIp', port: TaosadapterPort } ] +} diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/entrypoint.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..9252eb561ce77bd57d204cfcc25ee6fd4eaab3dd --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash +sed -i 's/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;' /root/statsd/config.js +nohup node /root/statsd/stats.js /root/statsd/config.js & +sleep 10 +for i in `seq 1 100`; +do + echo "${HOSTNAME}.count${i}:55|c" | nc -w 1 -u 127.0.0.1 8125 +done +tail -f /dev/null diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/run_statsd.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/run_statsd.sh new file mode 100755 index 0000000000000000000000000000000000000000..3a4a5bb5b21dc44a65be6e3b4cf219001df9e215 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/statsd_docker/run_statsd.sh @@ -0,0 +1,53 @@ +#!/bin/bash +case "$1" in + -h|--help) + echo "Usage:" + echo "1st arg: agent_count" + echo "2nd arg: container_hostname prefix" + echo "3rd arg: TaosadapterIp" + echo "4th arg: TaosadapterPort" + echo "eg: ./run_statsd.sh 1 statsd_agent1 172.26.10.86 6044" + echo "eg: ./run_statsd.sh 2 statsd_agent* 172.26.10.86 6044" + exit 0 +;; +esac + +if [ $1 == "rm" ]; then + docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm + exit +fi + +if [ ! -n "$1" ]; then + echo "please input 1st arg" + exit +fi +if [ ! -n "$2" ]; then + echo "please input 2nd arg" + exit +fi +if [ ! -n "$3" ]; then + echo "please input 3rd arg" + exit +fi +if [ ! -n "$4" ]; then + echo "please input 4th arg" + exit +fi +if [ $1 -eq 1 ];then + docker ps | grep $2 + if [ $? -eq 0 ];then + docker stop $2 && docker rm $2 + fi + docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 taosadapter_statsd:v1 /bin/bash +else + perfix=`echo $2 | cut -d '*' -f 1` + for i in `seq 1 $1`; + do + docker ps | grep $perfix$i + if [ $? -eq 0 ];then + docker stop $perfix$i && docker rm $perfix$i + fi + docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 taosadapter_statsd:v1 /bin/bash + done +fi +#docker run -itd --name statsd_agent1 -h statsd_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6044 taosadapter_statsd:v1 /bin/bash diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/Dockerfile b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..031f81305c4f734929b650d61ddc7761312ae3a1 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:20.04 +ENV REFRESHED_AT 2021-12-06 +WORKDIR /root +ARG DEBIAN_FRONTEND=noninteractive +RUN set -ex; \ + apt update -y --fix-missing && \ + apt-get install -y --no-install-recommends git python && \ + git clone git://github.com/OpenTSDB/tcollector.git && \ + apt remove -y git && \ + rm -rf /var/lib/apt/lists/* +COPY config.py /root/tcollector/collectors/etc/config.py +COPY entrypoint.sh /entrypoint.sh +ENV TaosadapterIp 127.0.0.1 +ENV TaosadapterPort 6049 +ENTRYPOINT ["/entrypoint.sh"] diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/config.py b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/config.py new file mode 100644 index 0000000000000000000000000000000000000000..83cb6fc39e8299ccc1f152a82eb8af1fed151c9b --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/config.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# This file is part of tcollector. +# Copyright (C) 2010 The tcollector Authors. +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. This program is distributed in the hope that it +# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +# General Public License for more details. You should have received a copy +# of the GNU Lesser General Public License along with this program. If not, +# see . + +# This 'onload' function will be called by tcollector when it starts up. +# You can put any code here that you want to load inside the tcollector. +# This also gives you a chance to override the options from the command +# line or to add custom sanity checks on their values. +# You can also use this to change the global tags that will be added to +# every single data point. For instance if you have multiple different +# pools or clusters of machines, you might wanna lookup the name of the +# pool or cluster the current host belongs to and add it to the tags. +# Throwing an exception here will cause the tcollector to die before it +# starts doing any work. +# Python files in this directory that don't have an "onload" function +# will be imported by tcollector too, but no function will be called. +# When this file executes, you can assume that its directory is in +# sys.path, so you can import other Python modules from this directory +# or its subdirectories. +import os +import sys + +def onload(options, tags): + """Function called by tcollector when it starts up. + + Args: + options: The options as returned by the OptionParser. + tags: A dictionnary that maps tag names to tag values. + """ + pass + +def get_defaults(): + """Configuration values to use as defaults in the code + + This is called by the OptionParser. + """ + + default_cdir = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'collectors') + + defaults = { + 'verbose': False, + 'no_tcollector_stats': False, + 'evictinterval': 6000, + 'dedupinterval': 300, + 'deduponlyzero': False, + 'allowed_inactivity_time': 600, + 'dryrun': False, + 'maxtags': 8, + 'http_password': False, + 'reconnectinterval': 0, + 'http_username': False, + 'port': TaosadapterPort, + 'pidfile': '/var/run/tcollector.pid', + 'http': False, + 'http_api_path': "api/put", + 'tags': [], + 'remove_inactive_collectors': False, + 'host': 'TaosadapterIp', + 'logfile': '/var/log/tcollector.log', + 'cdir': default_cdir, + 'ssl': False, + 'stdin': False, + 'daemonize': False, + 'hosts': False, + "monitoring_interface": None, + "monitoring_port": 13280, + "namespace_prefix": "", + } + + return defaults diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/entrypoint.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..17e9ed39d78b707fd86c4539189f65e8b7deed68 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/entrypoint.sh @@ -0,0 +1,4 @@ +#!/bin/bash +sed -i 's/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;' /root/tcollector/collectors/etc/config.py +/root/tcollector/tcollector start +tail -f /dev/null diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/run_tcollector.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/run_tcollector.sh new file mode 100755 index 0000000000000000000000000000000000000000..a01f107ab6f1b3fbc92683a48b3d6e8470508b86 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/tcollector_docker/run_tcollector.sh @@ -0,0 +1,53 @@ +#!/bin/bash +case "$1" in + -h|--help) + echo "Usage:" + echo "1st arg: agent_count" + echo "2nd arg: container_hostname prefix" + echo "3rd arg: TaosadapterIp" + echo "4th arg: TaosadapterPort" + echo "eg: ./run_tcollector.sh 1 tcollector_agent1 172.26.10.86 6049" + echo "eg: ./run_tcollector.sh 2 tcollector_agent* 172.26.10.86 6049" + exit 0 +;; +esac + +if [ $1 == "rm" ]; then + docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm + exit +fi + +if [ ! -n "$1" ]; then + echo "please input 1st arg" + exit +fi +if [ ! -n "$2" ]; then + echo "please input 2nd arg" + exit +fi +if [ ! -n "$3" ]; then + echo "please input 3rd arg" + exit +fi +if [ ! -n "$4" ]; then + echo "please input 4th arg" + exit +fi +if [ $1 -eq 1 ];then + docker ps | grep $2 + if [ $? -eq 0 ];then + docker stop $2 && docker rm $2 + fi + docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 taosadapter_tcollector:v1 /bin/bash +else + perfix=`echo $2 | cut -d '*' -f 1` + for i in `seq 1 $1`; + do + docker ps | grep $perfix$i + if [ $? -eq 0 ];then + docker stop $perfix${i} && docker rm $perfix${i} + fi + docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 taosadapter_tcollector:v1 /bin/bash + done +fi +#docker run -itd --name tcollector_agent1 -h tcollector_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6049 taosadapter_tcollector:v1 /bin/bash diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/Dockerfile b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..99586deb44161e5e196e90a603c179257869ba29 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/Dockerfile @@ -0,0 +1,22 @@ +FROM ubuntu:20.04 +ENV REFRESHED_AT 2021-12-06 +ARG DEBIAN_FRONTEND=noninteractive +WORKDIR /root +RUN set -ex; \ + apt update -y --fix-missing && \ + apt install -y gnupg curl systemctl +RUN set -ex; \ + curl -fsSL https://repos.influxdata.com/influxdb.key | apt-key add - && \ + . /etc/lsb-release && \ + echo 'deb https://repos.influxdata.com/ubuntu focal stable' > /etc/apt/sources.list.d/influxdb.list && \ + apt update -y --fix-missing && \ + apt-get install -y --no-install-recommends telegraf && \ + apt remove -y gnupg curl && \ + rm -rf /var/lib/apt/lists/* +COPY entrypoint.sh /entrypoint.sh +COPY telegraf.conf /etc/telegraf/telegraf.conf +ENV TelegrafInterval 1s +ENV TaosadapterIp 127.0.0.1 +ENV TaosadapterPort 6048 +ENV Dbname telegraf +ENTRYPOINT ["/entrypoint.sh"] diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/entrypoint.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..7a8f91fad7a54519125bf2b584c0b913ea1fe296 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/entrypoint.sh @@ -0,0 +1,4 @@ +#!/bin/bash +sed -i 's/TaosadapterIp/'$TaosadapterIp'/g;s/TaosadapterPort/'$TaosadapterPort'/g;s/TelegrafInterval/'$TelegrafInterval'/g;s/Dbname/'$Dbname'/g;' /etc/telegraf/telegraf.conf +systemctl restart telegraf +tail -f /dev/null diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/run_telegraf.sh b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/run_telegraf.sh new file mode 100755 index 0000000000000000000000000000000000000000..3d8d1fc65aa3d5499c0a0b8c8b7292f11291ea0a --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/run_telegraf.sh @@ -0,0 +1,64 @@ +#!/bin/bash +case "$1" in + -h|--help) + echo "Usage:" + echo "1st arg: agent_count" + echo "2nd arg: container_hostname prefix" + echo "3rd arg: TaosadapterIp" + echo "4th arg: TaosadapterPort" + echo "5th arg: TelegrafInterval" + echo "6th arg: Dbname" + echo "eg: ./run_telegraf.sh 1 telegraf_agent1 172.26.10.86 6041 1s telegraf" + echo "eg: ./run_telegraf.sh 2 telegraf_agent* 172.26.10.86 6041 1s telegraf" + exit 0 +;; +esac + +if [ $1 == "rm" ]; then + docker ps | grep $2 | awk '{print $1}' | xargs docker stop | xargs docker rm + exit +fi + +if [ ! -n "$1" ]; then + echo "please input 1st arg" + exit +fi +if [ ! -n "$2" ]; then + echo "please input 2nd arg" + exit +fi +if [ ! -n "$3" ]; then + echo "please input 3rd arg" + exit +fi +if [ ! -n "$4" ]; then + echo "please input 4th arg" + exit +fi +if [ ! -n "$5" ]; then + echo "please input 5th arg" + exit +fi +if [ ! -n "$6" ]; then + echo "please input 6th arg" + exit +fi +if [ $1 -eq 1 ];then + docker ps | grep $2 + if [ $? -eq 0 ];then + docker stop $2 && docker rm $2 + fi + docker run -itd --name $2 -h $2 -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e TelegrafInterval=$5 taosadapter_telegraf:v1 /bin/bash +else + perfix=`echo $2 | cut -d '*' -f 1` + for i in `seq 1 $1`; + do + docker ps | grep $perfix$i + if [ $? -eq 0 ];then + docker stop $perfix$i && docker rm $perfix$i + fi + docker run -itd --name $perfix$i -h $perfix$i -e TaosadapterIp=$3 -e TaosadapterPort=$4 -e TelegrafInterval=$5 -e Dbname=$6 taosadapter_telegraf:v1 /bin/bash + done +fi + +#docker run -itd --name telegraf_agent1 -h telegraf_agent1 -e TaosadapterIp=172.26.10.86 -e TaosadapterPort=6041 -e TelegrafInterval=1s -e Dbname=telegraf taosadapter_telegraf:v1 /bin/bash diff --git a/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/telegraf.conf b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/telegraf.conf new file mode 100644 index 0000000000000000000000000000000000000000..e95179519fd7997e6c22861f0b1b7e164129b2a1 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/agent_dockerfile/telegraf_docker/telegraf.conf @@ -0,0 +1,8956 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + + +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "TelegrafInterval" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "TelegrafInterval" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + + +# Configuration for sending metrics to InfluxDB +[[outputs.influxdb]] + ## The full HTTP or UDP URL for your InfluxDB instance. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + # urls = ["unix:///var/run/influxdb.sock"] + # urls = ["udp://127.0.0.1:8089"] + # urls = ["http://127.0.0.1:8086"] + + ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. + # database = "telegraf" + + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the 'database_tag' will not be included in the written metric. + # exclude_database_tag = false + + ## If true, no CREATE DATABASE queries will be sent. Set to true when using + ## Telegraf with a user without permissions to create databases or when the + ## database already exists. + # skip_database_creation = false + + ## Name of existing retention policy to write to. Empty string writes to + ## the default retention policy. Only takes effect when using HTTP. + # retention_policy = "" + + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". + ## Only takes effect when using HTTP. + # write_consistency = "any" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## HTTP Basic Auth + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## UDP payload size is the maximum packet size to send. + # udp_payload = "512B" + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + # influx_uint_support = false + + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints +# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China or other sovereign +# ## cloud environment, set appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## Optional. PubSub attributes to add to metrics. +# # [outputs.cloud_pubsub.attributes] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false + + +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) +# # http_proxy_url = "http://localhost:8888" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to a Dynatrace environment +# [[outputs.dynatrace]] +# ## For usage with the Dynatrace OneAgent you can omit any configuration, +# ## the only requirement is that the OneAgent is running on the same host. +# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. +# ## +# ## Your Dynatrace environment URL. +# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) +# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" +# url = "" +# +# ## Your Dynatrace API token. +# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API +# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. +# api_token = "" +# +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional flag for ignoring tls certificate check +# # insecure_skip_verify = false +# +# +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option. +# enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false +# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string +# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's +# force_document_id = false + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## Program to run as daemon +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode metric groups. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." +# +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# +# ## timeout in seconds for the write connection to graphite +# timeout = 2 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog +# [[outputs.graylog]] +# ## UDP endpoint for your graylog instance. +# servers = ["127.0.0.1:12201"] +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # # Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 + + +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] +# urls = ["http://127.0.0.1:8086"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to; must exist. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Display Communication to Instrumental +# debug = false + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" +# +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. +# routing_tag = "host" +# +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## DEPRECATED: PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## DEPRECATED: If set the partitionKey will be a random UUID on every put. +# ## This allows for scaling across multiple shards in a stream. +# ## This will cause issues with ordering. +# use_random_partitionkey = false +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" +# +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librato API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" +# + + +# # Send aggregate metrics to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # Send logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID, if not set a random ID is generated +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Defines the maximum length of time that the broker and client may not communicate. +# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a +# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. +# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. +# # keep_alive = 0 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## Optional client name +# # name = "" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## New Relic Insights API key +# insights_key = "insights api key" +# +# ## Prefix to add to add to metric name for easy identification. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# listen = ":9273" +# +# ## Metric version controls the mapping from Telegraf metrics into +# ## Prometheus format. When using the prometheus input, use the same value in +# ## both plugins to ensure metrics are round-tripped without modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Content encoding for packet-based connections (i.e. UDP, unixgram). +# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # Send metrics to SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additional resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # A plugin that can transmit metrics to Sumo Logic HTTP Source +# [[outputs.sumologic]] +# ## Unique URL generated for your HTTP Metrics Source. +# ## This is the address to send metrics to. +# # url = "https://events.sumologic.net/receiver/v1/http/" +# +# ## Data format to be used for sending metrics. +# ## This will set the "Content-Type" header accordingly. +# ## Currently supported formats: +# ## * graphite - for Content-Type of application/vnd.sumologic.graphite +# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 +# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus +# ## +# ## More information can be found at: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics +# ## +# ## NOTE: +# ## When unset, telegraf will by default use the influx serializer which is currently unsupported +# ## in HTTP Source. +# data_format = "carbon2" +# +# ## Timeout used for HTTP request +# # timeout = "5s" +# +# ## Max HTTP request body size in bytes before compression (if applied). +# ## By default 1MB is recommended. +# ## NOTE: +# ## Bear in mind that in some serializer a metric even though serialized to multiple +# ## lines cannot be split any further so setting this very low might not work +# ## as expected. +# # max_request_body_size = 1000000 +# +# ## Additional, Sumo specific options. +# ## Full list can be found here: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers +# +# ## Desired source name. +# ## Useful if you want to override the source name configured for the source. +# # source_name = "" +# +# ## Desired host name. +# ## Useful if you want to override the source host configured for the source. +# # source_host = "" +# +# ## Desired source category. +# ## Useful if you want to override the source category configured for the source. +# # source_category = "" +# +# ## Comma-separated key=value list of dimensions to apply to every metric. +# ## Custom dimensions will allow you to query your metrics at a more granular level. +# # dimensions = "" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognized metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for Amazon Timestream output. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for Wavefront server to send metrics to +# [[outputs.wavefront]] +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" +# +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 +# +# ## prefix for metrics keys +# #prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# #simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# #metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# #convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accepted +# #use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# #use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# #source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# #convert_bool = true +# +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# #truncate_tags = false +# +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true +# +# ## Define a mapping, namespaced by metric prefix, from string values to numeric values +# ## deprecated in 1.9; use the enum processor plugin +# #[[outputs.wavefront.string_to_number.elasticsearch]] +# # green = 1.0 +# # yellow = 0.5 +# # red = 0.0 + + +# # Generic WebSocket output writer. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:8080/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + +# # Clone metrics and apply modifications. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# # [processors.defaults.fields] +# # field_1 = "bar" +# # time_idle = 0 +# # is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map. Globs accepted. +# field = "status" +# +# ## Name of the tag to map. Globs accepted. +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## Program to run as daemon +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file +# [[processors.port_name]] +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" +# +# ## Name of output tag or field (depending on the source) where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values with regex pattern +# [[processors.regex]] +# ## Tag and field conversions defined in a separate sub-tables +# # [[processors.regex.tags]] +# # ## Tag to change +# # key = "resp_code" +# # ## Regular expression to match on a tag value +# # pattern = "^(\\d)\\d\\d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}xx" +# +# # [[processors.regex.fields]] +# # ## Field to change +# # key = "request" +# # ## All the power of the Go regular expressions available here +# # ## For example, named subgroups +# # pattern = "^/api(?P/[\\w/]+)\\S*" +# # replacement = "${method}" +# # ## If result_key is present, a new field will be created +# # ## instead of changing existing field +# # result_key = "method" +# +# ## Multiple conversions may be applied for one field sequentially +# ## Let's extract one more value +# # [[processors.regex.fields]] +# # key = "request" +# # pattern = ".*category=(\\w+).*" +# # replacement = "${1}" +# # result_key = "search_category" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## +# ## This aggregator will estimate a derivative for each field, which is +# ## contained in both the first and last metric of the aggregation interval. +# ## Without further configuration the derivative will be calculated with +# ## respect to the time difference between these two measurements in seconds. +# ## The formula applied is for every field: +# ## +# ## value_last - value_first +# ## derivative = -------------------------- +# ## time_difference_in_seconds +# ## +# ## The resulting derivative will be named *fieldname_rate*. The suffix +# ## "_rate" can be configured by the *suffix* parameter. When using a +# ## derivation variable you can include its name for more clarity. +# # suffix = "_rate" +# ## +# ## As an abstraction the derivative can be calculated not only by the time +# ## difference but by the difference of a field, which is contained in the +# ## measurement. This field is assumed to be monotonously increasing. This +# ## feature is used by specifying a *variable*. +# ## Make sure the specified variable is not filtered and exists in the metrics +# ## passed to this aggregator! +# # variable = "" +# ## +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## +# ## Note, that the calculation is based on the actual timestamp of the +# ## measurements. When there is only one measurement during that period, the +# ## measurement will be rolled over to the next period. The maximum number of +# ## such roll-overs can be configured with a default of 10. +# # max_roll_over = 10 +# ## + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telemetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. +# # num_histogram_buckets = 100 # default: 10 + + +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Read metrics of bcache from stats_total and dirty_data +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, Telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy_url = "http://localhost:8888" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. +# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Collects conntrack stats from the configured directories and files. +# [[inputs.conntrack]] +# ## The following defaults would work with multiple versions of conntrack. +# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across +# ## kernel versions, as are the directory locations. +# +# ## Superset of filenames to look for within the conntrack dirs. +# ## Missing files will be ignored. +# files = ["ip_conntrack_count","ip_conntrack_max", +# "nf_conntrack_count","nf_conntrack_max"] +# +# ## Directories to search within for the conntrack files above. +# ## Missing directories will be ignored. +# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## Telegraf metrics. +# ## +# ## example: metric_version = 1; deprecated in 1.15 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read per-node and per-bucket metrics from Couchbase +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-ee-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout in seconds. +# # timeout = 2 + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false +# +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# +# ## Which environment variables should we use as a tag +# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Reads metrics from DPDK applications using v2 telemetry interface. +# [[inputs.dpdk]] +# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Duration that defines how long the connected socket client will wait for a response before terminating connection. +# ## This includes both writing to and reading from socket. Since it's local socket access +# ## to a fast packet processing application, the timeout should be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended) +# # socket_access_timeout = "200ms" +# +# ## Enables telemetry data collection for selected device types. +# ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). +# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). +# # device_types = ["ethdev"] +# +# ## List of custom, application-specific telemetry commands to query +# ## The list of available commands depend on the application deployed. Applications can register their own commands +# ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands +# ## For e.g. L3 Forwarding with Power Management Sample Application this could be: +# ## additional_commands = ["/l3fwd-power/stats"] +# # additional_commands = [] +# +# ## Allows turning off collecting data for individual "ethdev" commands. +# ## Remove "/ethdev/link_status" from list to start getting link status metrics. +# [inputs.dpdk.ethdev] +# exclude_commands = ["/ethdev/link_status"] +# +# ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify +# ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. +# ## [inputs.dpdk.tags] +# ## dpdk_instance = "my-fwd-app" + + +# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# [[inputs.ecs]] +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# # you can add username and password to your url to use basic authentication: +# # servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to also obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to also obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. Valid options +# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", +# ## "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them +# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# use_sudo = false + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Parse a complete file each interval +# [[inputs.file]] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. +# # file_tag = "" +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/log/**.log"] +# +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor. +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## Deprecated in 1.12, use 'urls' +# ## Server address (default http://localhost) +# # address = "http://localhost" +# +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will be raised +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. httpjson_webserver_stats +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Headers (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization. +# [[inputs.intel_powerstat]] +# ## All global metrics are always collected by Intel PowerStat plugin. +# ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. +# ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level +# ## telemetry will be exposed by Intel PowerStat plugin. +# ## Supported options: +# ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" +# # cpu_metrics = [] + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" + + +# # Gather packets and bytes throughput from iptables +# [[inputs.iptables]] +# ## iptables require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run iptables. +# ## Users must configure sudo to allow telegraf user to run iptables with no password. +# ## iptables can be restricted to only list command "iptables -nvL". +# use_sudo = false +# ## Setting 'use_lock' to true runs iptables with the "-w" option. +# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") +# use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" +# ## defines the table to monitor: +# table = "filter" +# ## defines the chains to monitor. +# ## NOTE: iptables rules without a comment will not be monitored. +# ## Read the plugin documentation for more information. +# chains = [ "INPUT" ] + + +# # Collect virtual and real server stats from Linux IPVS +# [[inputs.ipvs]] +# # no configuration + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] +# +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# # DEPRECATED: the jolokia plugin has been deprecated in favor of the +# # jolokia2 plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# ## NOTE that your jolokia security policy must allow for POST requests. +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## Attribute delimiter +# ## +# ## When multiple attributes are returned for a single +# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric +# ## name, and the attribute name, separated by the given delimiter. +# # delimiter = "_" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Get kernel statistics from /proc/vmstat +# [[inputs.kernel_vmstat]] +# # no configuration + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4020"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics from local Lustre service on OST, MDS +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats", +# # "/proc/fs/lustre/obdfilter/*/job_stats", +# # ] +# # mds_procfiles = [ +# # "/proc/fs/lustre/mdt/*/md_stats", +# # "/proc/fs/lustre/mdt/*/job_stats", +# # ] + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Get md array statistics from /proc/mdstat +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# # controller = "file:///dev/ttyUSB0" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# servers = ["mongodb://127.0.0.1:27017?connect=direct"] +# +# ## When true, collect cluster status +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# # mariadb_dialect = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# # An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# tls_ca = "/etc/telegraf/ca.pem" +# tls_cert = "/etc/telegraf/cert.cer" +# tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' full status information (ngx_http_status_module) +# [[inputs.nginx_plus]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # A plugin to collect stats from the NSD authoritative DNS name server +# [[inputs.nsd]] +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# ## Metric name +# # name = "opcua" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the estabilished connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## Example: +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # Reverse metric names so they sort more naturally. Recommended. +# # This defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# ## The default timeout of 1000ms can be overridden with (in milliseconds): +# timeout = 1000 + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 + + +# # Measure postfix queue statistics +# [[inputs.postfix]] +# ## Postfix queue directory. If not provided, telegraf will try to use +# ## 'postconf -h queue_directory' to determine it. +# # queue_directory = "/var/spool/postfix" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name, supports globs when include_systemd_children is set to true +# # systemd_unit = "nginx.service" +# # include_systemd_children = false +# ## CGroup name or path, supports globs +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# queue_name_include = [] +# queue_name_exclude = [] +# +# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. +# ## If neither are specified, metrics for all federation upstreams are gathered. +# ## Federation link metrics will only be gathered for queues and exchanges +# ## whose non-federation metrics will be collected (e.g a queue excluded +# ## by the 'queue_name_exclude' option will also be excluded from federation). +# ## Globs accepted. +# # federation_upstream_include = ["dataCentre-*"] +# # federation_upstream_exclude = [] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on +# url = "https://localhost:8080" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Server url +# address = "https://127.0.0.1:5000" +# +# ## Username, Password for hardware server +# username = "root" +# password = "password123456" +# +# ## ComputerSystemId +# computer_system_id="2M220100SL" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# ## +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# ## +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Monitor sensors, requires lm-sensors package +# [[inputs.sensors]] +# ## Remove numbers from field names. +# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. +# # remove_numbers = true +# +# ## Timeout is the maximum amount of time that the sensors command can run. +# # timeout = "5s" + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Agent host tag; the tag used to reference the source host +# # agent_host_tag = "agent_host" +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# # no configuration + + +# # Sysstat metrics collector +# [[inputs.sysstat]] +# ## Path to the sadc command. +# # +# ## Common Defaults: +# ## Debian/Ubuntu: /usr/lib/sysstat/sadc +# ## Arch: /usr/lib/sa/sadc +# ## RHEL/CentOS: /usr/lib64/sa/sadc +# sadc_path = "/usr/lib/sa/sadc" # required +# +# ## Path to the sadf command, if it is not in PATH +# # sadf_path = "/usr/bin/sadf" +# +# ## Activities is a list of activities, that are passed as argument to the +# ## sadc collector utility (e.g: DISK, SNMP etc...) +# ## The more activities that are added, the more data is collected. +# # activities = ["DISK"] +# +# ## Group metrics to measurements. +# ## +# ## If group is false each metric will be prefixed with a description +# ## and represents itself a measurement. +# ## +# ## If Group is true, corresponding metrics are grouped to a single measurement. +# # group = true +# +# ## Options for the sadf command. The values on the left represent the sadf +# ## options and the values on the right their description (which are used for +# ## grouping and prefixing metrics). +# ## +# ## Run 'sar -h' or 'man sar' to find out the supported options for your +# ## sysstat version. +# [inputs.sysstat.options] +# -C = "cpu" +# -B = "paging" +# -b = "io" +# -d = "disk" # requires DISK activity +# "-n ALL" = "network" +# "-P ALL" = "per_cpu" +# -q = "queue" +# -R = "mem" +# -r = "mem_util" +# -S = "swap_util" +# -u = "cpu_util" +# -v = "inode" +# -W = "swap" +# -w = "task" +# # -H = "hugepages" # only available for newer linux distributions +# # "-I ALL" = "interrupts" # requires INT activity +# +# ## Device tags can be used to add additional tags for devices. +# ## For example the configuration below adds a tag vg with value rootvg for +# ## all metrics with sda devices. +# # [[inputs.sysstat.device_tags.sda]] +# # vg = "rootvg" + + +# # Gather systemd units state +# [[inputs.systemd_units]] +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # A plugin to collect stats from Varnish HTTP Cache +# [[inputs.varnish]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the varnishstat binary can be overridden with: +# binary = "/usr/bin/varnishstat" +# +# ## By default, telegraf gather stats for 3 metric points. +# ## Setting stats will override the defaults shown below. +# ## Glob matching can be used, ie, stats = ["MAIN.*"] +# ## stats may also be set to ["*"], which will collect all stats +# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] +# +# ## Optional name for the varnish instance (or working directory) to query +# ## Usually append after -n in varnish cli +# # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# ## By default, don't gather zpool stats +# # poolMetrics = false +# ## By default, don't gather zdataset stats +# # datasetMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.KNXListener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Broker to consume from. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the +# ## jolokia2 plugin instead. +# ## +# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# ModTs = "ignore" +# CreateTs = "ignore" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: username = "default" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## Program to run as daemon +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.http_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" + + +# # Intel Resource Director Technology plugin +# [[inputs.intel_rdt]] +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" +# +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" +# +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false + + +# # Read JTI OpenConfig Telemetry from listed sensors +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + +# # Stream and parse log file(s). +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a separate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# ## +# ## The script option can be used to specify the .sql file path. +# ## If script and sqlquery options specified at same time, sqlquery will be used +# ## +# ## the tagvalue field is used to define custom tags (separated by comas). +# ## the query is expected to return columns which match the names of the +# ## defined tags. The values in these columns must be of a string-type, +# ## a number-type or a blob-type. +# ## +# ## The timestamp field is used to override the data points timestamp value. By +# ## default, all rows inserted with current time. By setting a timestamp column, +# ## the row will be inserted with that column's value. +# ## +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# ## timestamp string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into +# ## Telegraf metrics. When using the prometheus_client output, use the same +# ## value in both plugins to ensure metrics are round-tripped without +# ## modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "url" +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +# [[inputs.ras]] +# ## Optional path to RASDaemon sqlite3 database. +# ## Default: /var/lib/rasdaemon/ras-mc_event.db +# # db_path = "" + + +# # Riemann protobuff listener. +# [[inputs.riemann_listener]] +# ## URL to listen on. +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version, defaults to 2c +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" + + +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# # database_type = "AzureSQLDB" +# +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# # database_type = "AzureSQLManagedInstance" +# +# # include_query = [] +# +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# +# database_type = "SQLServer" +# +# include_query = [] +# +# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false + + +# # Statsd UDP/TCP Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# #max_ttl = "1000h" + + +# # Suricata stats and alerts plugin +# [[inputs.suricata]] +# ## Data sink for Suricata stats and alerts logs +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" +# +# ## Detect alert logs +# # alerts = false + + +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 +# [[inputs.syslog]] +# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an +# ## indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## This field must be either "previous" or "next". +# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, +# ## whereas "next" indicates that the line belongs to the next one. +# #match_which_line = "previous" +# +# ## The invert_match field can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline +# ## filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# ## After the specified timeout, this plugin sends a multiline event even if no new pattern +# ## is found to start a new event. The default timeout is 5s. +# #timeout = 5s + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Generic UDP listener +# [[inputs.udp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# [inputs.webhooks.particle] +# path = "/particle" + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# # path = "/api/v1/spans" # URL path for span data +# # port = 9411 # Port on which Telegraf listens +[[outputs.http]] + url = "http://TaosadapterIp:TaosadapterPort/influxdb/v1/write?db=Dbname" + method = "POST" + timeout = "5s" + username = "root" + password = "taosdata" + data_format = "influx" + influx_max_line_bytes = 250 diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/createStaticData.jmx b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/createStaticData.jmx new file mode 100644 index 0000000000000000000000000000000000000000..29317a3460abf812b52492488c0886bfc17deb75 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/createStaticData.jmx @@ -0,0 +1,332 @@ + + + + + + false + true + true + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists db0; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + + continue + + false + 1 + + 10 + + false + + + true + + + + true + + + + false + CREATE TABLE if not exists db0.stb${stb_counter} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool); + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + + continue + + false + 1000 + + 10 + + false + + + true + + + + true + + + + false + CREATE TABLE if not exists db0.tb${tb_counter} using db0.stb${stb_counter} tags (${stb_counter}, ${stb_counter}, ${stb_counter}, ${stb_counter}, ${stb_counter}.${stb_counter}, ${stb_counter}.${stb_counter}, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + + continue + + false + 1000000 + + 10 + + false + + + true + + + + true + + + + false + insert into db0.tb${tb_counter} values (${ts_counter}, ${stb_counter}, ${stb_counter}, ${stb_counter}, ${stb_counter}, ${stb_counter}.${stb_counter}, ${stb_counter}.${stb_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1 + 10 + 1 + stb_counter + + false + + + + 1 + 10000 + 1 + tb_counter + + false + + + + 1614530008000 + + 100 + ts_counter + + false + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + drop database if exists db0; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + + + diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/error_insert.jmx b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/error_insert.jmx new file mode 100644 index 0000000000000000000000000000000000000000..39327a1a9c842f8a07e12eced557adaae5a5ea86 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/error_insert.jmx @@ -0,0 +1,886 @@ + + + + + + false + true + false + + + + + + + + continue + + false + -1 + + 1 + + false + + + true + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/influxdb/v1/write?db=db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/telnet/db2 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/json/db3 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/influxdb/v1/write?db=long_db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/telnet/long_db2 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/json/long_db3 + POST + true + false + true + false + + + + + + + + throughput + 600.0 + 0.0 + + 0 + + + + + continue + + false + -1 + + 1 + + false + + + true + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/influxdb/v1/write?db=db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/telnet/db2 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/json/db3 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/influxdb/v1/write?db=long_db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/telnet/long_db2 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/json/long_db3 + POST + true + false + true + false + + + + + + + + throughput + 600.0 + 0.0 + + 0 + + + + + continue + + false + -1 + + 1 + + false + + + true + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/influxdb/v1/write?db=db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/telnet/db2 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/json/db3 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/influxdb/v1/write?db=long_db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/telnet/long_db2 + POST + true + false + true + false + + + + + + + true + + + + false + ${error_sql} + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/json/long_db3 + POST + true + false + true + false + + + + + + + + throughput + 600.0 + 0.0 + + 0 + + + + + ; + UTF-8 + error_sql.csv + false + false + true + shareMode.all + false + error_sql + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/error_sql.csv b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/error_sql.csv new file mode 100644 index 0000000000000000000000000000000000000000..c21b84c7d729d69cba77c6d79e61c7aef06a1176 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/error_sql.csv @@ -0,0 +1,63 @@ +"insert into db1.stb11 values (nahr,);" +"insert into db1.stb22 (now,1,1);" +"insert into db1.stb13 (now,1,1) (now, 1,1, 2);" +"insert into db1.tb1 (now, 1,2 ,3 ,4);" +"insert into db1.stb16 values (now, null);" +"insert into db1.stb15 values (1614530008000,a, b, %$, d, e, f, g);" +"stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC col_value=32.261068 1614530008000" +"stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=1,region=us-west-1,service=10,service_environment=staging,service_version=2,team=NYC col_value=32.261068" +"stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=2,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC 32.261068 1614530008000" +stb15 1614530008000 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack= region=us-west-1 service=10 service_environment=staging service_version= team=NYC +stb15 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC +stb15 1614530008000 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC +"{""metric"": ""stb16"", ""timestamp"":1614530008000, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"{""metric"": ""stb16"", ""timestamp"":, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":1,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"{""metric"": ""stb16"", ""timestamp"":1614530008000, ""value"":, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"insert into db2.stb21 values (nahr,);" +"insert into db2.stb32 (now,1,1);" +"insert into db2.stb23 (now,1,1) (now, 1,1, 2);" +"insert into db2.tb1 (now, 1,2 ,3 ,4);" +"insert into db2.stb26 values (now, null);" +"insert into db2.stb25 values (1614530008000,a, b, %$, d, e, f, g);" +"stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC col_value=32.261068 1614530008000" +"stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=1,region=us-west-1,service=10,service_environment=staging,service_version=2,team=NYC col_value=32.261068" +"stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=2,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC 32.261068 1614530008000" +stb25 1614530008000 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack= region=us-west-1 service=10 service_environment=staging service_version= team=NYC +stb25 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC +stb25 1614530008000 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC +"{""metric"": ""stb26"", ""timestamp"":1614530008000, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"{""metric"": ""stb26"", ""timestamp"":, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":1,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"{""metric"": ""stb26"", ""timestamp"":1614530008000, ""value"":, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"insert into db3.stb31 values (nahr,);" +"insert into db3.stb12 (now,1,1);" +"insert into db3.stb33 (now,1,1) (now, 1,1, 2);" +"insert into db3.tb1 (now, 1,2 ,3 ,4);" +"insert into db3.stb36 values (now, null);" +"insert into db3.stb35 values (1614530008000,a, b, %$, d, e, f, g);" +"stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC col_value=32.261068 1614530008000" +"stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=1,region=us-west-1,service=10,service_environment=staging,service_version=2,team=NYC col_value=32.261068" +"stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=2,region=us-west-1,service=10,service_environment=staging,service_version=1614530008000,team=NYC 32.261068 1614530008000" +stb35 1614530008000 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack= region=us-west-1 service=10 service_environment=staging service_version= team=NYC +stb35 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC +stb35 1614530008000 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=1 region=us-west-1 service=10 service_environment=staging service_version=1 team=NYC +"{""metric"": ""stb36"", ""timestamp"":1614530008000, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"{""metric"": ""stb36"", ""timestamp"":, ""value"":32.261068286779754, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":1,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"{""metric"": ""stb36"", ""timestamp"":1614530008000, ""value"":, ""tags"":{""arch"":""x64"",""datacenter"":""us-west-1b"",""hostname"":""host_5"",""os"":""Ubuntu16"",""rack"":,""region"":""us-west-1"",""service"":""10"",""service_environment"":""staging"",""service_version"":""1"",""team"":""NYC""}}" +"insert into long_db1.stb11 values (nahr,);" +"insert into long_db1.stb22 (now,1,1);" +"insert into long_db1.stb13 (now,1,1) (now, 1,1, 2);" +"insert into long_db1.tb1 (now, 1,2 ,3 ,4);" +"insert into long_db1.stb16 values (now, null);" +"insert into long_db1.stb15 values (1614530008000,a, b, %$, d, e, f, g);" +"insert into long_db2.stb21 values (nahr,);" +"insert into long_db2.stb32 (now,1,1);" +"insert into long_db2.stb23 (now,1,1) (now, 1,1, 2);" +"insert into long_db2.tb1 (now, 1,2 ,3 ,4);" +"insert into long_db2.stb26 values (now, null);" +"insert into long_db2.stb25 values (1614530008000,a, b, %$, d, e, f, g);" +"insert into long_db3.stb31 values (nahr,);" +"insert into long_db3.stb12 (now,1,1);" +"insert into long_db3.stb33 (now,1,1) (now, 1,1, 2);" +"insert into long_db3.tb1 (now, 1,2 ,3 ,4);" +"insert into long_db3.stb36 values (now, null);" +"insert into long_db3.stb35 values (1614530008000,a, b, %$, d, e, f, g);" \ No newline at end of file diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/longInsertData.jmx b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/longInsertData.jmx new file mode 100644 index 0000000000000000000000000000000000000000..e123de698aa44b0452b233d6b36d32d5bb5fc7f8 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/longInsertData.jmx @@ -0,0 +1,1540 @@ + + + + + + false + true + true + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists long_db1; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + create database if not exists long_db2; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + create database if not exists long_db3; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + continue + + false + 1 + + 1 + + false + + + true + + + + true + + + + false + CREATE TABLE if not exists long_db1.stb11 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db1.stb12 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.86:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db1.stb13 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.86:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db2.stb21 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.85:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db2.stb22 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db2.stb23 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db3.stb31 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.84:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db3.stb32 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db3.stb33 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + continue + + false + 500 + + 2 + + false + + + true + + + + true + + + + false + CREATE TABLE if not exists long_db1.tb${tb_counter1000} using long_db1.stb11 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db1.tb${tb_counter2000} using long_db1.stb12 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.86:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db1.tb${tb_counter3000} using long_db1.stb13 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.86:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db2.tb${tb_counter1000} using long_db2.stb21 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.85:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db2.tb${tb_counter2000} using long_db2.stb22 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db2.tb${tb_counter3000} using long_db2.stb23 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.85:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db3.tb${tb_counter1000} using long_db3.stb31 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.84:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db3.tb${tb_counter2000} using long_db3.stb32 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.84:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists long_db3.tb${tb_counter3000} using long_db3.stb33 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + select count(tbname) from long_db1.stb11; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + select count(*) from long_db2.stb22; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + select * from long_db3.stb33 limit 100; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + continue + + false + -1 + + 2 + + false + + + true + + + + true + + + + false + insert into long_db1.tb${tb_counter1000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + insert into long_db1.tb${tb_counter2000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.86:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + insert into long_db1.tb${tb_counter3000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.86:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 ${ts_counter} + = + + + + + + + + http://172.26.10.86:6041/influxdb/v1/write?db=long_db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb15 ${ts_counter} 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/telnet/long_db1 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb16", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/json/long_db1 + POST + true + false + true + false + + + + + + + true + + + + false + insert into long_db2.tb${tb_counter1000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.85:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + insert into long_db2.tb${tb_counter2000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + insert into long_db2.tb${tb_counter3000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 ${ts_counter} + = + + + + + + + + http://172.26.10.85:6041/influxdb/v1/write?db=long_db2&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb25 ${ts_counter} 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/telnet/long_db2 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb26", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/json/long_db2 + POST + true + false + true + false + + + + + + + true + + + + false + insert into long_db3.tb${tb_counter1000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.84:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + insert into long_db3.tb${tb_counter2000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + insert into long_db3.tb${tb_counter3000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 ${ts_counter} + = + + + + + + + + http://172.26.10.84:6041/influxdb/v1/write?db=long_db3&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb35 ${ts_counter} 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/telnet/long_db3 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb36", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/json/long_db3 + POST + true + false + true + false + + + + + + + true + + + + false + select count(tbname) from long_db1.tb1; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + select count(*) from long_db2.tb2; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + select * from long_db3.tb3 limit 10; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + select * from db2.stb25 limit 10000; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + select * from db3.stb31 limit 100000; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + select * from db1.stb16 limit 1000000; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + select avg(c1), max(c2), count(c3), sum(c4) from db3.stb31 where ts>"2021-03-01 00:00:00" and ts < "2021-03-01 00:35:00" group by c7; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + select avg(c1), max(c2), count(c3), sum(c4) from db2.stb23 where ts>"2021-03-01 00:00:00" and ts < "2021-03-01 00:35:00" interval (1s); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + select avg(c1), max(c2), count(c3), sum(c4) from db2.stb23 where ts>"2021-03-01 00:00:00" and ts < "2021-03-01 00:35:00" interval (10s); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + throughput + 50000.0 + 0.0 + + 0 + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1 + 1000 + 1 + tb_counter1000 + + false + + + + 1001 + 2000 + 1 + tb_counter2000 + + false + + + + 2001 + 3000 + 1 + tb_counter3000 + + false + + + + 1614530008000 + + 1 + ts_counter + + false + + + + 1 + 10 + 1 + value_counter + + false + + + + + diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/query.jmx b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/query.jmx new file mode 100644 index 0000000000000000000000000000000000000000..0192424ce3fed443355fec001684b0517f178e03 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/query.jmx @@ -0,0 +1,391 @@ + + + + + + false + true + false + + + + + + + + continue + + false + -1 + + 1 + + false + + + true + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.86:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.86:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + continue + + false + -1 + + 1 + + false + + + true + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.85:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.85:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + continue + + false + -1 + + 1 + + false + + + true + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.84:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.84:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + ${query_sql} + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + , + UTF-8 + query_sql.csv + false + false + true + shareMode.all + false + query_sql + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/query_sql.csv b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/query_sql.csv new file mode 100644 index 0000000000000000000000000000000000000000..5da0bf755ab0e346f66535c6778cfe40a3a5199a --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/query_sql.csv @@ -0,0 +1,64 @@ +select * from db0.stb1 limit 1000; +select * from db0.stb3 limit 10000; +select * from db0.stb5 limit 100000; +"select avg(c1), max(c2), count(c3), sum(c4) from db0.stb1 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-08 00:00:00"" interval (1h);" +"select avg(c1), max(c2), count(c3), sum(c4) from db0.stb1 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-08 00:00:00"" group by c7;" +show db0.stables; +select count(tbname) from db1.tb1; +select count(*) from db2.tb2; +select * from db3.tb3 limit 10; +select * from db2.stb25 limit 10000; +select * from db3.stb31 limit 100000; +select * from db1.stb16 limit 100000; +"select avg(c1), max(c2), count(c3), sum(c4) from db3.stb31 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" group by c7;" +"select avg(c1), max(c2), count(c3), sum(c4) from db2.stb23 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" interval (1s);" +"select avg(c1), max(c2), count(c3), sum(c4) from db1.stb13 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" interval (10s);" +select count(tbname) from long_db1.tb1; +select count(*) from long_db2.tb2; +select * from long_db3.tb3 limit 10; +select * from long_db2.stb25 limit 10000; +select * from long_db3.stb31 limit 100000; +select * from long_db1.stb16 limit 100000; +"select avg(c1), max(c2), count(c3), sum(c4) from long_db3.stb31 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" group by c7;" +"select avg(c1), max(c2), count(c3), sum(c4) from long_db2.stb23 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" interval (1s);" +"select avg(c1), max(c2), count(c3), sum(c4) from long_db1.stb13 where ts>""2021-03-01 00:00:00"" and ts < ""2021-03-01 00:35:00"" interval (10s);" +show taosadapter1_collectd.stables; +select count(*) from taosadapter1_collectd.`cpu.6.cpu.interrupt`; +select last(*) from taosadapter2_collectd.`cpu.2.cpu.interrupt`; +select * from taosadapter3_collectd.`cpu.2.cpu.system` limit 100; +select count(*) from taosadapter1_telegraf.mem; +select last(*) from taosadapter2_telegraf.cpu; +select * from taosadapter3_telegraf.kernel; +select count(*) from taosadapter1_tcollector.`net.stat.tcp.retransmit`; +select last(*) from taosadapter2_tcollector.`proc.meminfo.shmem`; +select * from taosadapter3_tcollector.`sys.numa.allocation`; +select count(*) from taosadapter1_icinga2.`icinga.host.rta_min`; +select last(*) from taosadapter2_icinga2.`icinga.host.acknowledgement`; +select * from taosadapter3_icinga2.`icinga.host.rta_crit`; +select count(*) from taosadapter1_node_exporter.`node_time_seconds`; +select last(*) from taosadapter2_node_exporter.`go_memstats_next_gc_bytes`; +select * from taosadapter3_node_exporter.`node_sockstat_TCP_mem`; +select count(*) from taosadapter2_statsd.taosadapter2_statsd_agent_count50; +select c199 from db0.stb2; +select null from db1.stb11; +select * from db2.tb0; +select blank from db3.stb31 limit 100000; +select null from long_db1.stb11; +select * from long_db2.tb0; +select blank from long_db3.stb31 limit 100000; +select count(*) from taosadapter1_collectd.cpu.6.cpu.interrupt; +select last(*) from taosadapter2_collectd.cpu.2.cpu.interr; +select * from taosadapter3_coll*.`cpu.2.cpu.system` limit 100; +select count(*) from taosadapter1_telegraf.`mem`; +select last(*) from `taosadapter2_telegraf`.cpu; +select * from taos_telegraf.kernel; +select count(*) from `taosadapter1_tcollector`.`net.stat.tcp.retransmit`; +select last(*) from taosadapter2_`tcollector`.`proc.meminfo.shmem`; +select * from taosadapter3_tcollector.sys.numa.`allocation`; +select count(*) from taosadapter1_icinga2.:icinga.host.rta_min; +"select last(*) from taosadapter2_icinga2.""""""`icinga.host.acknowledgement`;" +select * from taosadapter3_icinga2.```icinga.host.rta_crit`; +select count(*) from taosadapter1_node_exporter..`node_time_seconds`; +select last(*) from ..taosadapter2_node_exporter.`go_memstats_next_gc_bytes`; +select * from taosa%%dapter3___node_exporter.`node_sockstat_TCP_mem`; +select count(*) from taosadapter2_statsd%.taosadapter2_statsd_agent_count50; \ No newline at end of file diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/run_all.sh b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/run_all.sh new file mode 100755 index 0000000000000000000000000000000000000000..8afeb94bef9982459ffb74224e4eae63596a1b03 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/run_all.sh @@ -0,0 +1,9 @@ +#!/bin/bash +jmeter -n -t createStaticData.jmx >> createStaticData.log +jmeter -n -t longInsertData.jmx >> longInsertData.log +jmeter -n -t error_insert.jmx >> error_insert.log +jmeter -n -t query.jmx >> query.log +while true +do + jmeter -n -t shortInsertData.jmx >> ./shortInsertData.log +done diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/run_shortInsert.sh b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/run_shortInsert.sh new file mode 100755 index 0000000000000000000000000000000000000000..2cc59165e21a8dad9ad90af9cd4caa417edb6742 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/run_shortInsert.sh @@ -0,0 +1,5 @@ +#!/bin/bash +while true +do + jmeter -n -t shortInsertData.jmx >> ./shortInsertData.log +done diff --git a/tests/stability-scripts/taosadapter_stability/jmeter_jmx/shortInsertData.jmx b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/shortInsertData.jmx new file mode 100644 index 0000000000000000000000000000000000000000..cc034e905c4a5b691be1586840bd1bc56b339af1 --- /dev/null +++ b/tests/stability-scripts/taosadapter_stability/jmeter_jmx/shortInsertData.jmx @@ -0,0 +1,2263 @@ + + + + + + false + true + true + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists db1; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + create database if not exists db2; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + create database if not exists db3; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + continue + + false + 1 + + 1 + + false + + + true + + + + true + + + + false + CREATE TABLE if not exists db1.stb11 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db1.stb12 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.86:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db1.stb13 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.86:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=13,region=us-west-1,service=10,service_environment=staging,service_version=0,team=NYC col_value=32.261068 1626006833640 + = + + + + + + + + http://172.26.10.86:6041/influxdb/v1/write?db=db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb15 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/telnet/db1 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb16", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"0","team":"NYC"}} + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/json/db1 + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db2.stb21 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.85:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db2.stb22 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db2.stb23 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=13,region=us-west-1,service=10,service_environment=staging,service_version=0,team=NYC col_value=32.261068 1626006833640 + = + + + + + + + + http://172.26.10.85:6041/influxdb/v1/write?db=db2&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb25 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/telnet/db2 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb26", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"0","team":"NYC"}} + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/json/db2 + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db3.stb31 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.84:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db3.stb32 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db3.stb33 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(16), c8 nchar(16), c9 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(16), t8 nchar(16), t9 bool); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=13,region=us-west-1,service=10,service_environment=staging,service_version=0,team=NYC col_value=32.261068 1626006833640 + = + + + + + + + + http://172.26.10.84:6041/influxdb/v1/write?db=db3&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb35 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/telnet/db3 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb36", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"0","team":"NYC"}} + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/json/db3 + POST + true + false + true + false + + + + + + + + continue + + false + 500 + + 2 + + false + + + true + + + + true + + + + false + CREATE TABLE if not exists db1.tb${tb_counter1000} using db1.stb11 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db1.tb${tb_counter2000} using db1.stb12 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.86:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db1.tb${tb_counter3000} using db1.stb13 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.86:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 1626006833640 + = + + + + + + + + http://172.26.10.86:6041/influxdb/v1/write?db=db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb15 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/telnet/db1 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb16", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/json/db1 + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db2.tb${tb_counter1000} using db2.stb21 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.85:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db2.tb${tb_counter2000} using db2.stb22 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db2.tb${tb_counter3000} using db2.stb23 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.85:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 1626006833640 + = + + + + + + + + http://172.26.10.85:6041/influxdb/v1/write?db=db2&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb25 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/telnet/db2 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb26", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/json/db2 + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db3.tb${tb_counter1000} using db3.stb31 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.84:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db3.tb${tb_counter2000} using db3.stb32 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.84:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + CREATE TABLE if not exists db3.tb${tb_counter3000} using db3.stb33 tags (1, 2, 3, 4, 5.5, 6.6, "binary10", "nchar10", true) + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 1626006833640 + = + + + + + + + + http://172.26.10.84:6041/influxdb/v1/write?db=db3&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb35 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/telnet/db3 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb36", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/json/db3 + POST + true + false + true + false + + + + + + + true + + + + false + select count(tbname) from db1.stb11; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + select count(*) from db2.stb22; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + select * from db3.stb33 limit 100; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + continue + + false + 125000 + + 8 + + false + + + true + + + + true + + + + false + insert into db1.tb${tb_counter1000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + insert into db1.tb${tb_counter2000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.86:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + insert into db1.tb${tb_counter3000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.86:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb14,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 ${ts_counter} + = + + + + + + + + http://172.26.10.86:6041/influxdb/v1/write?db=db1&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb15 ${ts_counter} 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/telnet/db1 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb16", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.86:6041/opentsdb/v1/put/json/db1 + POST + true + false + true + false + + + + + + + true + + + + false + insert into db2.tb${tb_counter1000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.85:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + insert into db2.tb${tb_counter2000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + insert into db2.tb${tb_counter3000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb24,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 ${ts_counter} + = + + + + + + + + http://172.26.10.85:6041/influxdb/v1/write?db=db2&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb25 ${ts_counter} 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/telnet/db2 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb26", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.85:6041/opentsdb/v1/put/json/db2 + POST + true + false + true + false + + + + + + + true + + + + false + insert into db3.tb${tb_counter1000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.84:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + insert into db3.tb${tb_counter2000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + insert into db3.tb${tb_counter3000} values (${ts_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}, ${value_counter}.${value_counter}, ${value_counter}.${value_counter}, "binary10", "nchar10", true); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + stb34,arch=x64,datacenter=us-west-1b,hostname=host_5,os=Ubuntu16,rack=${tb_counter1000},region=us-west-1,service=10,service_environment=staging,service_version=${tb_counter1000},team=NYC col_value=32.261068 ${ts_counter} + = + + + + + + + + http://172.26.10.84:6041/influxdb/v1/write?db=db3&precision=ms + POST + true + false + true + false + + + + + + + true + + + + false + stb35 ${ts_counter} 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${tb_counter2000} region=us-west-1 service=10 service_environment=staging service_version=${tb_counter2000} team=NYC + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/telnet/db3 + POST + true + false + true + false + + + + + + + true + + + + false + {"metric": "stb36", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${tb_counter3000}","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${tb_counter3000}","team":"NYC"}} + = + + + + + + + + http://172.26.10.84:6041/opentsdb/v1/put/json/db3 + POST + true + false + true + false + + + + + + + true + + + + false + select count(tbname) from db1.tb1; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + select count(*) from db2.tb2; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + select * from db3.tb3 limit 1000; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + drop table if exists db2.tb2; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + drop table if exists db2.stb22; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + drop table if exists db3.tb3; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + drop table if exists db3.stb33; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + drop table if exists db1.tb1; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + drop table if exists db1.stb11; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + select * from db2.stb25 limit 10000; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + select * from db3.stb31 limit 100000; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + select * from db1.stb16 limit 1000000; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + true + + + + false + select avg(c1), max(c2), count(c3), sum(c4) from db3.stb31 where ts>"2021-03-01 00:00:00" and ts < "2021-03-01 00:35:00" group by c7; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + select avg(c1), max(c2), count(c3), sum(c4) from db2.stb23 where ts>"2021-03-01 00:00:00" and ts < "2021-03-01 00:35:00" interval (1s); + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + select avg(c1), max(c2), count(c3), sum(c4) from db2.stb23 where ts>"2021-03-01 00:00:00" and ts < "2021-03-01 00:35:00" interval (10s); + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1 + 1000 + 1 + tb_counter1000 + + false + + + + 1001 + 2000 + 1 + tb_counter2000 + + false + + + + 2001 + 3000 + 1 + tb_counter3000 + + false + + + + 1614530008000 + + 1 + ts_counter + + false + + + + 1 + 10 + 1 + value_counter + + false + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + drop database if exists db1; + = + + + + + + + + http://172.26.10.86:6041/rest/sql + POST + true + false + true + false + + + + + + + true + + + + false + drop database if exists db2; + = + + + + + + + + http://172.26.10.85:6041/rest/sqlt + POST + true + false + true + false + + + + + + + true + + + + false + drop database if exists db3; + = + + + + + + + + http://172.26.10.84:6041/rest/sqlutc + POST + true + false + true + false + + + + + + + + + diff --git a/tests/system-test/2-query/TD-12427.py b/tests/system-test/2-query/TD-12427.py new file mode 100644 index 0000000000000000000000000000000000000000..846c45d4f30183b5f58656d5684d6611206b8d49 --- /dev/null +++ b/tests/system-test/2-query/TD-12427.py @@ -0,0 +1,101 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from posixpath import split +import sys +import os + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record + self.num = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + def caseDescription(self): + + ''' + case1 : [TD-12427] : + this test case is an test case for forbid elapsed and twa used to streams ; + + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + + def run(self): + tdSql.prepare() + tdSql.execute("create database if not exists testdb keep 36500;") + tdSql.execute("use testdb;") + tdSql.execute("create stable st (ts timestamp , id int , value double) tags(hostname binary(10) ,ind int);") + for i in range(self.num): + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+100*i,i*2,i+10.00)) + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+200*i,i*2,i+10.00)) + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+300*i,i*2,i+10.00)) + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+10000*i,i*2,i+10.00)) + + tdSql.execute('create table test_stream as select max(value) from sub_1 interval(1m) sliding(30s)') + tdSql.query('show tables like "test_stream"') + tdSql.checkRows(1) + tdSql.error('create table test_elapsed1 as select elapsed(ts) from sub_1 interval(1m) sliding(30s)') + tdSql.error('create table test_twa1 as select twa(value) from sub_1 interval(1m) sliding(30s)') + tdSql.error('create table test_elapsed2 as select elapsed(ts) from st interval(1m) sliding(30s) group by tbname') + tdSql.error('create table test_twa2 as select twa(value) from st interval(1m) sliding(30s) group by tbname') + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + diff --git a/tests/system-test/2-query/TD-12909.py b/tests/system-test/2-query/TD-12909.py index ed325ea427db89488e3e74cba38d4a8f2594a420..b04e4178fd25e74a842507d2814dfdc15765c188 100644 --- a/tests/system-test/2-query/TD-12909.py +++ b/tests/system-test/2-query/TD-12909.py @@ -86,17 +86,21 @@ class TDTestCase: tdSql.query("select count(*) from st where hostname between 'abc' and 'def'") tdSql.error("select count(*) from st where hostname between 1 and 2 or sum(1)") - tdSql.execute("select count(*) from st where hostname < max(123)") + tdSql.error("select count(*) from st where hostname < max(123)") - tdSql.execute("select count(*) from st where hostname < max('abc')") - tdSql.execute("select count(*) from st where hostname < max(min(123))") + tdSql.error("select count(*) from st where hostname < max('abc')") + tdSql.error("select count(*) from st where hostname < max(min(123))") - tdSql.execute("select count(*) from st where hostname < sum('abc')") - tdSql.execute("select count(*) from st where hostname < sum(min(123))") + tdSql.error("select count(*) from st where hostname < sum('abc')") + tdSql.error("select count(*) from st where hostname < sum(min(123))") - tdSql.execute("select count(*) from st where hostname < diff('abc')") - tdSql.execute("select count(*) from st where hostname < diff(min(123))") + tdSql.error("select count(*) from st where hostname < diff('abc')") + tdSql.error("select count(*) from st where hostname < diff(min(123))") + tdSql.error("select count(*) from st where hostname < tbname") + tdSql.error("select count(*) from st where ts > 0 and tbname in ('d1', 'd2') and tbname-2") + + tdSql.query("select count(*) from st where id > 10000000000000") def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest-query.sh b/tests/system-test/fulltest-query.sh index 54706a07922d656b261404390ce41b38d2529547..269274f9d92527a41a60134cc3401b3a38785c45 100755 --- a/tests/system-test/fulltest-query.sh +++ b/tests/system-test/fulltest-query.sh @@ -20,7 +20,8 @@ python3 ./test.py -f 2-query/TD-12344.py #python3 ./test.py -f 2-query/TD-12594.py python3 ./test.py -f 2-query/TD-12614.py python3 ./test.py -f 2-query/function_elapsed.py - +python3 ./test.py -f 2-query/TD-12909.py +python3 ./test.py -f 2-query/TD-12427.py