diff --git a/CMakeLists.txt b/CMakeLists.txt
index 565ab32f005226a13368e2df444929625092e9ad..5fee2e548aaf66e0769a0da569c6d910969179ba 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -19,6 +19,7 @@ SET(TD_MEM_CHECK FALSE)
SET(TD_PAGMODE_LITE FALSE)
SET(TD_SOMODE_STATIC FALSE)
+SET(TD_POWER FALSE)
SET(TD_GODLL FALSE)
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
diff --git a/README.md b/README.md
index 776d7a154b3249f77efad33c618a181e11f87133..3fbd166f49935e1d9408fa81e2d15caebcfc152b 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
-![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)
+[![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)](https://hub.docker.com/repository/docker/tdengine/tdengine)
[![TDengine](TDenginelogo.png)](https://www.taosdata.com)
@@ -29,24 +29,69 @@ For user manual, system design and architecture, engineering blogs, refer to [TD
# Building
At the moment, TDengine only supports building and running on Linux systems. You can choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) or from the source code. This quick guide is for installation from the source only.
-To build TDengine, use [CMake](https://cmake.org/) 2.8 or higher versions in the project directory. Install CMake for example on Ubuntu:
-```
-sudo apt-get install -y cmake build-essential
+To build TDengine, use [CMake](https://cmake.org/) 3.5 or higher versions in the project directory.
+
+## Install tools
+
+### Ubuntu & Debian:
+```bash
+sudo apt-get install -y gcc cmake build-essential git
```
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
-To install openjdk-8 on Ubuntu:
+To install openjdk-8:
+```bash
+sudo apt-get install -y openjdk-8-jdk
+```
+
+To install Apache Maven:
+```bash
+sudo apt-get install -y maven
+```
+
+### Centos 7:
+```bash
+sudo yum install -y gcc gcc-c++ make cmake3 epel-release git
+sudo yum remove -y cmake
+sudo ln -s /usr/bin/cmake3 /usr/bin/cmake
+```
+
+To install openjdk-8:
+```bash
+sudo yum install -y java-1.8.0-openjdk
```
-sudo apt-get install openjdk-8-jdk
+
+To install Apache Maven:
+```bash
+sudo yum install -y maven
```
-To install Apache Maven on Ubuntu:
+
+### Centos 8 & Fedora:
+```bash
+sudo dnf install -y gcc gcc-c++ make cmake epel-release git
```
-sudo apt-get install maven
+
+To install openjdk-8:
+```bash
+sudo dnf install -y java-1.8.0-openjdk
+```
+
+To install Apache Maven:
+```bash
+sudo dnf install -y maven
```
-Build TDengine:
+## Get the source codes
+- github:
+```bash
+git clone https://github.com/taosdata/TDengine.git
+cd TDengine
```
+
+## Build TDengine
+
+```bash
mkdir debug && cd debug
cmake .. && cmake --build .
```
@@ -54,12 +99,12 @@ cmake .. && cmake --build .
To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below:
aarch64:
-```cmd
+```bash
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
aarch32:
-```cmd
+```bash
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
@@ -124,6 +169,7 @@ The TDengine community has also kindly built some of their own connectors! Follo
- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
+- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
# How to run the test cases and how to add a new test case?
TDengine's test framework and all test cases are fully open source.
diff --git a/cmake/define.inc b/cmake/define.inc
index a63b9f1732a8c404a1b701899fae4b8588e8f7ca..0a25dd9ee71df0d16cdf921fde6a923529192421 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -17,6 +17,10 @@ IF (TD_GODLL)
ADD_DEFINITIONS(-D_TD_GO_DLL_)
ENDIF ()
+IF (TD_POWER)
+ ADD_DEFINITIONS(-D_TD_POWER_)
+ENDIF ()
+
IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF ()
diff --git a/cmake/input.inc b/cmake/input.inc
index 0235ba42d33ae1cfe3659331ca3e777c3e5e8bac..f90b10a0870732d9b0c5de548a4c4c75f52137a3 100755
--- a/cmake/input.inc
+++ b/cmake/input.inc
@@ -27,6 +27,11 @@ IF (${SOMODE} MATCHES "static")
MESSAGE(STATUS "Link so using static mode")
ENDIF ()
+IF (${DBNAME} MATCHES "power")
+ SET(TD_POWER TRUE)
+ MESSAGE(STATUS "power is true")
+ENDIF ()
+
IF (${DLLTYPE} MATCHES "go")
SET(TD_GODLL TRUE)
MESSAGE(STATUS "input dll type: " ${DLLTYPE})
diff --git a/cmake/install.inc b/cmake/install.inc
index 997101c8d920f9bfab01ff96e933ea73fc5e2420..7a92a396e32bdd60a7fb07f5466f243c73816a16 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -2,9 +2,14 @@ IF (TD_LINUX)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
- INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})")
+ INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
ELSEIF (TD_WINDOWS)
- SET(CMAKE_INSTALL_PREFIX C:/TDengine)
+ IF (TD_POWER)
+ SET(CMAKE_INSTALL_PREFIX C:/PowerDB)
+ ELSE ()
+ SET(CMAKE_INSTALL_PREFIX C:/TDengine)
+ ENDIF ()
+
IF (NOT TD_GODLL)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
@@ -14,8 +19,14 @@ ELSEIF (TD_WINDOWS)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
- INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
+ INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
+
+ IF (TD_POWER)
+ INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .)
+ ELSE ()
+ INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
+ ENDIF ()
+
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
@@ -29,5 +40,5 @@ ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
- INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin)")
-ENDIF ()
\ No newline at end of file
+ INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin ${TD_VER_NUMBER})")
+ENDIF ()
diff --git a/cmake/version.inc b/cmake/version.inc
index 8c025201421e285ea54a895225d90205680f1e84..c620d753a6babeff993469d5c48a9718f8f00d8d 100644
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -1,42 +1,69 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
-SET(TD_VER_1 "2")
-SET(TD_VER_2 "0")
-SET(TD_VER_3 "2")
-SET(TD_VER_4 "3")
-SET(TD_VER_GIT "d711657139620f6c50f362597020705b8ad26bd2")
-SET(TD_VER_GIT_INTERNAL "1d74ae24c541ffbb280e8630883c0236cd45f8c7")
-
-SET(TD_VER_VERTYPE "stable")
-SET(TD_VER_CPUTYPE "x64")
-SET(TD_VER_OSTYPE "Linux")
+IF (DEFINED VERNUMBER)
+ SET(TD_VER_NUMBER ${VERNUMBER})
+ELSE ()
+ SET(TD_VER_NUMBER "2.0.2.0")
+ENDIF ()
-SET(TD_VER_COMPATIBLE "2.0.0.0")
-STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
+IF (DEFINED VERCOMPATIBLE)
+ SET(TD_VER_COMPATIBLE ${VERCOMPATIBLE})
+ELSE ()
+ SET(TD_VER_COMPATIBLE "2.0.0.0")
+ENDIF ()
-IF (TD_LINUX_64)
- SET(TD_VER_CPUTYPE "x64")
+IF (DEFINED GITINFO)
+ SET(TD_VER_GIT ${GITINFO})
+ELSE ()
+ SET(TD_VER_GIT "community")
ENDIF ()
-IF (TD_LINUX_32)
- SET(TD_VER_CPUTYPE "x86")
+IF (DEFINED GITINFOI)
+ SET(TD_VER_GIT_INTERNAL ${GITINFOI})
+ELSE ()
+ SET(TD_VER_GIT_INTERNAL "internal")
ENDIF ()
-IF (TD_ARM_64)
- SET(TD_VER_CPUTYPE "aarch64")
+IF (DEFINED VERDATE)
+ SET(TD_VER_DATE ${VERDATE})
+ELSE ()
+ STRING(TIMESTAMP TD_VER_DATE "%Y-%m-%d %H:%M:%S")
ENDIF ()
-IF (TD_ARM_32)
- SET(TD_VER_CPUTYPE "aarch32")
+IF (DEFINED VERTYPE)
+ SET(TD_VER_VERTYPE ${VERTYPE})
+ELSE ()
+ SET(TD_VER_VERTYPE "stable")
ENDIF ()
-
-IF (TD_WINDOWS_64)
- SET(TD_VER_CPUTYPE "x64")
+
+IF (DEFINED CPUTYPE)
+ SET(TD_VER_CPUTYPE ${CPUTYPE})
+ELSE ()
+ IF (TD_WINDOWS_32)
+ SET(TD_VER_CPUTYPE "x86")
+ ELSE ()
+ SET(TD_VER_CPUTYPE "x64")
+ ENDIF ()
ENDIF ()
-IF (TD_WINDOWS_32)
- SET(TD_VER_CPUTYPE "x86")
+IF (DEFINED OSTYPE)
+ SET(TD_VER_OSTYPE ${OSTYPE})
+ELSE ()
+ SET(TD_VER_OSTYPE "Linux")
ENDIF ()
+MESSAGE(STATUS "============= compile version parameter information start ============= ")
+MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER})
+MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE})
+MESSAGE(STATUS "communit commit id:" ${TD_VER_GIT})
+MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL})
+MESSAGE(STATUS "build date:" ${TD_VER_DATE})
+MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE})
+MESSAGE(STATUS "ver cpu:" ${TD_VER_CPUTYPE})
+MESSAGE(STATUS "os type:" ${TD_VER_OSTYPE})
+MESSAGE(STATUS "============= compile version parameter information end ============= ")
+
+STRING(REPLACE "." "_" TD_LIB_VER_NUMBER ${TD_VER_NUMBER})
+
CONFIGURE_FILE("${TD_COMMUNITY_DIR}/src/util/src/version.c.in" "${TD_COMMUNITY_DIR}/src/util/src/version.c")
diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md
index 12b1b1bd3ad196d3af5176d6cf2676ba2f8d9f5f..4d593cec90d6c6968e03ed51e4ea5e9e8ab73c14 100644
--- a/documentation20/webdocs/markdowndocs/Documentation-ch.md
+++ b/documentation20/webdocs/markdowndocs/Documentation-ch.md
@@ -52,7 +52,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [连接器](https://www.taosdata.com/cn/documentation20/connector)
- [C/C++ Connector](https://www.taosdata.com/cn/documentation20/connector/#C/C++-Connector):通过libtaos客户端的库,连接TDengine服务器的主要方法
-- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector/#Java-Connector):通过标准的JDBC API,给Java应用提供到TDengine的连接
+- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector-java):通过标准的JDBC API,给Java应用提供到TDengine的连接
- [Python Connector](https://www.taosdata.com/cn/documentation20/connector/#Python-Connector):给Python应用提供一个连接TDengine服务器的驱动
- [RESTful Connector](https://www.taosdata.com/cn/documentation20/connector/#RESTful-Connector):提供一最简单的连接TDengine服务器的方式
- [Go Connector](https://www.taosdata.com/cn/documentation20/connector/#Go-Connector):给Go应用提供一个连接TDengine服务器的驱动
diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
index 293aac8d23f039814720590735f16200f3483a51..b0f8ed276de21cc3fc94535e7f015d4988b5db7f 100644
--- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
+++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md
@@ -42,7 +42,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
| | 类型 | Bytes | 说明 |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
-| 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
+| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] |
@@ -416,7 +416,7 @@ taos> SELECT database();
power |
Query OK, 1 row(s) in set (0.000079s)
```
-如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据,则返回NULL。
+如果登录的时候没有指定默认数据库,且没有使用```use```命令切换数据,则返回NULL。
```
taos> SELECT database();
database() |
@@ -503,10 +503,10 @@ Query OK, 1 row(s) in set (0.001091s)
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
-1. 同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件,暂不支持OR连接的查询条件。
-2. 针对某一字段的过滤只支持单一区间的过滤条件。例如:value>20 and value<30是合法的过滤条件, 而Value<20 AND value<>5是非法的过滤条件。
+1. 同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件,暂不支持OR连接的不同列之间的查询过滤条件。
+2. 针对某一字段的过滤只支持单一时间区间过滤条件。但是针对其他的(普通)列或标签列,可以使用``` OR``` 条件进行组合条件的查询过滤。例如:((value > 20 and value < 30) OR (value < 12)) 。
-### Some Examples
+### SQL 示例
- 对于下面的例子,表tb1用以下语句创建
@@ -538,7 +538,7 @@ Query OK, 1 row(s) in set (0.001091s)
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
```
-## SQL函数
+## SQL 函数
### 聚合函数
diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md
index 6b22004c439af7f054bf987944f0d337a9b01afc..7b6afb75a7ad75d6b94b519cc1e1b438ecbf3b6f 100644
--- a/documentation20/webdocs/markdowndocs/connector-ch.md
+++ b/documentation20/webdocs/markdowndocs/connector-ch.md
@@ -280,365 +280,10 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
-## Java Connector
-
-TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
-
-由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
-
-* libtaos.so
- 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
-
-* taos.dll
- 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-
-> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
-
-TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
-
-* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
-* 由于不支持删除和修改,所以也不支持事务操作。
-* 目前不支持表间的 union 操作。
-* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
-
-
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
-
-| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
-| --- | --- | --- |
-| 2.0.2 | 2.0.0.x 及以上 | 1.8.x |
-| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
-
-## TDengine DataType 和 Java DataType
-
-TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
-
-| TDengine DataType | Java DataType |
-| --- | --- |
-| TIMESTAMP | java.sql.Timestamp |
-| INT | java.lang.Integer |
-| BIGINT | java.lang.Long |
-| FLOAT | java.lang.Float |
-| DOUBLE | java.lang.Double |
-| SMALLINT, TINYINT |java.lang.Short |
-| BOOL | java.lang.Boolean |
-| BINARY, NCHAR | java.lang.String |
-
-## 如何获取 TAOS-JDBCDriver
-
-### maven 仓库
-
-目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
-* [sonatype][8]
-* [mvnrepository][9]
-* [maven.aliyun][10]
-
-maven 项目中使用如下 pom.xml 配置即可:
-
-```xml
-
- com.taosdata.jdbc
- taos-jdbcdriver
- 2.0.2
-
-```
-
-### 源码编译打包
-
-下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
-
-
-## 使用说明
-
-### 获取连接
-
-如下所示配置即可获取 TDengine Connection:
-```java
-Class.forName("com.taosdata.jdbc.TSDBDriver");
-String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
-Connection conn = DriverManager.getConnection(jdbcUrl);
-```
-> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
-
-TDengine 的 JDBC URL 规范格式为:
-`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
-
-其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
-
-* user:登录 TDengine 用户名,默认值 root。
-* password:用户登录密码,默认值 taosdata。
-* charset:客户端使用的字符集,默认值为系统字符集。
-* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
-* locale:客户端语言环境,默认值系统当前 locale。
-* timezone:客户端使用的时区,默认值为系统当前时区。
-
-以上参数可以在 3 处配置,`优先级由高到低`分别如下:
-1. JDBC URL 参数
- 如上所述,可以在 JDBC URL 的参数中指定。
-2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
-```java
-public Connection getConn() throws Exception{
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
- Properties connProps = new Properties();
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
- return conn;
-}
-```
-
-3. 客户端配置文件 taos.cfg
-
- linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
-```properties
-# client default username
-# defaultUser root
-
-# client default password
-# defaultPass taosdata
-
-# default system charset
-# charset UTF-8
-
-# system locale
-# locale en_US.UTF-8
-```
-> 更多详细配置请参考[客户端配置][13]
-
-### 创建数据库和表
-
-```java
-Statement stmt = conn.createStatement();
-
-// create database
-stmt.executeUpdate("create database if not exists db");
-
-// use database
-stmt.executeUpdate("use db");
-
-// create table
-stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
-```
-> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
-
-### 插入数据
-
-```java
-// insert data
-int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
-
-System.out.println("insert " + affectedRows + " rows.");
-```
-> now 为系统内部函数,默认为服务器当前时间。
-> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
-
-### 查询数据
-
-```java
-// query data
-ResultSet resultSet = stmt.executeQuery("select * from tb");
-
-Timestamp ts = null;
-int temperature = 0;
-float humidity = 0;
-while(resultSet.next()){
-
- ts = resultSet.getTimestamp(1);
- temperature = resultSet.getInt(2);
- humidity = resultSet.getFloat("humidity");
-
- System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
-}
-```
-> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
-
-
-### 订阅
-
-#### 创建
-
-```java
-TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
-```
-
-`subscribe` 方法的三个参数含义如下:
-
-* topic:订阅的主题(即名称),此参数是订阅的唯一标识
-* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
-* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
-
-如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
-
-#### 消费数据
-
-```java
-int total = 0;
-while(true) {
- TSDBResultSet rs = sub.consume();
- int count = 0;
- while(rs.next()) {
- count++;
- }
- total += count;
- System.out.printf("%d rows consumed, total %d\n", count, total);
- Thread.sleep(1000);
-}
-```
-
-`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
-
-#### 关闭订阅
-
-```java
-sub.close(true);
-```
-
-`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
-
-
-### 关闭资源
-
-```java
-resultSet.close();
-stmt.close();
-conn.close();
-```
-> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
-## 与连接池使用
-
-**HikariCP**
-
-* 引入相应 HikariCP maven 依赖:
-```xml
-
- com.zaxxer
- HikariCP
- 3.4.1
-
-```
-
-* 使用示例如下:
-```java
- public static void main(String[] args) throws SQLException {
- HikariConfig config = new HikariConfig();
- config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
- config.setUsername("root");
- config.setPassword("taosdata");
-
- config.setMinimumIdle(3); //minimum number of idle connection
- config.setMaximumPoolSize(10); //maximum number of connection in the pool
- config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
- config.setIdleTimeout(60000); // max idle time for recycle idle connection
- config.setConnectionTestQuery("describe log.dn"); //validation query
- config.setValidationTimeout(3000); //validation query timeout
-
- HikariDataSource ds = new HikariDataSource(config); //create datasource
-
- Connection connection = ds.getConnection(); // get connection
- Statement statement = connection.createStatement(); // get statement
-
- //query or insert
- // ...
-
- connection.close(); // put back to conneciton pool
-}
-```
-> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
-> 更多 HikariCP 使用问题请查看[官方说明][5]
-
-**Druid**
-
-* 引入相应 Druid maven 依赖:
-
-```xml
-
- com.alibaba
- druid
- 1.1.20
-
-```
-
-* 使用示例如下:
-```java
-public static void main(String[] args) throws Exception {
- Properties properties = new Properties();
- properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
- properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
- properties.put("username","root");
- properties.put("password","taosdata");
-
- properties.put("maxActive","10"); //maximum number of connection in the pool
- properties.put("initialSize","3");//initial number of connection
- properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
- properties.put("minIdle","3");//minimum number of connection in the pool
-
- properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
-
- properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
- properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
-
- properties.put("validationQuery","describe log.dn"); //validation query
- properties.put("testWhileIdle","true"); // test connection while idle
- properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
- properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
-
- //create druid datasource
- DataSource ds = DruidDataSourceFactory.createDataSource(properties);
- Connection connection = ds.getConnection(); // get connection
- Statement statement = connection.createStatement(); // get statement
-
- //query or insert
- // ...
-
- connection.close(); // put back to conneciton pool
-}
-```
-> 更多 druid 使用问题请查看[官方说明][6]
-
-**注意事项**
-* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
-
-如下所示,`select server_status()` 执行成功会返回 `1`。
-```shell
-taos> select server_status();
-server_status()|
-================
-1 |
-Query OK, 1 row(s) in set (0.000141s)
-```
-
-## 与框架使用
-
-* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
-* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
-
-## 常见问题
-
-* java.lang.UnsatisfiedLinkError: no taos in java.library.path
-
- **原因**:程序没有找到依赖的本地函数库 taos。
-
- **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
-
-* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
-
- **原因**:目前 TDengine 只支持 64 位 JDK。
-
- **解决方法**:重新安装 64 位 JDK。
-
-* 其它问题请参考 [Issues][7]
-
## Python Connector
### 安装准备
-* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口)
+* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)][4]
* 已安装python 2.7 or >= 3.4
* 已安装pip
@@ -1137,18 +782,5 @@ promise2.then(function(result) {
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
-[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[3]: https://github.com/taosdata/TDengine
-[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
-[5]: https://github.com/brettwooldridge/HikariCP
-[6]: https://github.com/alibaba/druid
-[7]: https://github.com/taosdata/TDengine/issues
-[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[10]: https://maven.aliyun.com/mvn/search
-[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
-[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
-[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
-[14]: https://www.taosdata.com/cn/documentation20/connector/#Windows
-[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
+[4]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
+
diff --git a/documentation20/webdocs/markdowndocs/connector-java-ch.md b/documentation20/webdocs/markdowndocs/connector-java-ch.md
new file mode 100644
index 0000000000000000000000000000000000000000..da5ea529665faa5e8503890b015af7668104021d
--- /dev/null
+++ b/documentation20/webdocs/markdowndocs/connector-java-ch.md
@@ -0,0 +1,370 @@
+# Java Connector
+
+TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
+
+由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
+
+* libtaos.so
+ 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
+
+* taos.dll
+ 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
+
+> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
+
+TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
+
+* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
+* 由于不支持删除和修改,所以也不支持事务操作。
+* 目前不支持表间的 union 操作。
+* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
+
+
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+
+| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
+| --- | --- | --- |
+| 2.0.4 | 2.0.0.x 及以上 | 1.8.x |
+| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
+
+## TDengine DataType 和 Java DataType
+
+TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
+
+| TDengine DataType | Java DataType |
+| --- | --- |
+| TIMESTAMP | java.sql.Timestamp |
+| INT | java.lang.Integer |
+| BIGINT | java.lang.Long |
+| FLOAT | java.lang.Float |
+| DOUBLE | java.lang.Double |
+| SMALLINT, TINYINT |java.lang.Short |
+| BOOL | java.lang.Boolean |
+| BINARY, NCHAR | java.lang.String |
+
+## 如何获取 TAOS-JDBCDriver
+
+### maven 仓库
+
+目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
+* [sonatype][8]
+* [mvnrepository][9]
+* [maven.aliyun][10]
+
+maven 项目中使用如下 pom.xml 配置即可:
+
+```xml
+
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 2.0.4
+
+```
+
+### 源码编译打包
+
+下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
+
+
+## 使用说明
+
+### 获取连接
+
+如下所示配置即可获取 TDengine Connection:
+```java
+Class.forName("com.taosdata.jdbc.TSDBDriver");
+String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
+Connection conn = DriverManager.getConnection(jdbcUrl);
+```
+> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
+
+TDengine 的 JDBC URL 规范格式为:
+`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
+
+其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
+
+* user:登录 TDengine 用户名,默认值 root。
+* password:用户登录密码,默认值 taosdata。
+* charset:客户端使用的字符集,默认值为系统字符集。
+* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
+* locale:客户端语言环境,默认值系统当前 locale。
+* timezone:客户端使用的时区,默认值为系统当前时区。
+
+以上参数可以在 3 处配置,`优先级由高到低`分别如下:
+1. JDBC URL 参数
+ 如上所述,可以在 JDBC URL 的参数中指定。
+2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
+```java
+public Connection getConn() throws Exception{
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
+ Properties connProps = new Properties();
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
+ return conn;
+}
+```
+
+3. 客户端配置文件 taos.cfg
+
+ linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
+```properties
+# client default username
+# defaultUser root
+
+# client default password
+# defaultPass taosdata
+
+# default system charset
+# charset UTF-8
+
+# system locale
+# locale en_US.UTF-8
+```
+> 更多详细配置请参考[客户端配置][13]
+
+### 创建数据库和表
+
+```java
+Statement stmt = conn.createStatement();
+
+// create database
+stmt.executeUpdate("create database if not exists db");
+
+// use database
+stmt.executeUpdate("use db");
+
+// create table
+stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
+```
+> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
+
+### 插入数据
+
+```java
+// insert data
+int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
+
+System.out.println("insert " + affectedRows + " rows.");
+```
+> now 为系统内部函数,默认为服务器当前时间。
+> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
+
+### 查询数据
+
+```java
+// query data
+ResultSet resultSet = stmt.executeQuery("select * from tb");
+
+Timestamp ts = null;
+int temperature = 0;
+float humidity = 0;
+while(resultSet.next()){
+
+ ts = resultSet.getTimestamp(1);
+ temperature = resultSet.getInt(2);
+ humidity = resultSet.getFloat("humidity");
+
+ System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
+}
+```
+> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
+
+
+### 订阅
+
+#### 创建
+
+```java
+TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
+```
+
+`subscribe` 方法的三个参数含义如下:
+
+* topic:订阅的主题(即名称),此参数是订阅的唯一标识
+* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
+* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
+
+如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
+
+#### 消费数据
+
+```java
+int total = 0;
+while(true) {
+ TSDBResultSet rs = sub.consume();
+ int count = 0;
+ while(rs.next()) {
+ count++;
+ }
+ total += count;
+ System.out.printf("%d rows consumed, total %d\n", count, total);
+ Thread.sleep(1000);
+}
+```
+
+`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
+
+#### 关闭订阅
+
+```java
+sub.close(true);
+```
+
+`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
+
+
+### 关闭资源
+
+```java
+resultSet.close();
+stmt.close();
+conn.close();
+```
+> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
+## 与连接池使用
+
+**HikariCP**
+
+* 引入相应 HikariCP maven 依赖:
+```xml
+
+ com.zaxxer
+ HikariCP
+ 3.4.1
+
+```
+
+* 使用示例如下:
+```java
+ public static void main(String[] args) throws SQLException {
+ HikariConfig config = new HikariConfig();
+ config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
+ config.setUsername("root");
+ config.setPassword("taosdata");
+
+ config.setMinimumIdle(3); //minimum number of idle connection
+ config.setMaximumPoolSize(10); //maximum number of connection in the pool
+ config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
+ config.setIdleTimeout(60000); // max idle time for recycle idle connection
+ config.setConnectionTestQuery("describe log.dn"); //validation query
+ config.setValidationTimeout(3000); //validation query timeout
+
+ HikariDataSource ds = new HikariDataSource(config); //create datasource
+
+ Connection connection = ds.getConnection(); // get connection
+ Statement statement = connection.createStatement(); // get statement
+
+ //query or insert
+ // ...
+
+ connection.close(); // put back to conneciton pool
+}
+```
+> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
+> 更多 HikariCP 使用问题请查看[官方说明][5]
+
+**Druid**
+
+* 引入相应 Druid maven 依赖:
+
+```xml
+
+ com.alibaba
+ druid
+ 1.1.20
+
+```
+
+* 使用示例如下:
+```java
+public static void main(String[] args) throws Exception {
+ Properties properties = new Properties();
+ properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
+ properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
+ properties.put("username","root");
+ properties.put("password","taosdata");
+
+ properties.put("maxActive","10"); //maximum number of connection in the pool
+ properties.put("initialSize","3");//initial number of connection
+ properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
+ properties.put("minIdle","3");//minimum number of connection in the pool
+
+ properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
+
+ properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
+ properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
+
+ properties.put("validationQuery","describe log.dn"); //validation query
+ properties.put("testWhileIdle","true"); // test connection while idle
+ properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
+ properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
+
+ //create druid datasource
+ DataSource ds = DruidDataSourceFactory.createDataSource(properties);
+ Connection connection = ds.getConnection(); // get connection
+ Statement statement = connection.createStatement(); // get statement
+
+ //query or insert
+ // ...
+
+ connection.close(); // put back to conneciton pool
+}
+```
+> 更多 druid 使用问题请查看[官方说明][6]
+
+**注意事项**
+* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
+
+如下所示,`select server_status()` 执行成功会返回 `1`。
+```shell
+taos> select server_status();
+server_status()|
+================
+1 |
+Query OK, 1 row(s) in set (0.000141s)
+```
+
+## 与框架使用
+
+* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
+* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
+
+## 常见问题
+
+* java.lang.UnsatisfiedLinkError: no taos in java.library.path
+
+ **原因**:程序没有找到依赖的本地函数库 taos。
+
+ **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
+
+* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
+
+ **原因**:目前 TDengine 只支持 64 位 JDK。
+
+ **解决方法**:重新安装 64 位 JDK。
+
+* 其它问题请参考 [Issues][7]
+
+[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
+[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
+[3]: https://github.com/taosdata/TDengine
+[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
+[5]: https://github.com/brettwooldridge/HikariCP
+[6]: https://github.com/alibaba/druid
+[7]: https://github.com/taosdata/TDengine/issues
+[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
+[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
+[10]: https://maven.aliyun.com/mvn/search
+[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
+[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
+[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
+[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
+[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md
index b760fe161af4ccd3605f85362206ff9922a4f4e8..27c1054dc82aa37dd96c0aa4fcff26944e8e3b26 100644
--- a/documentation20/webdocs/markdowndocs/faq-ch.md
+++ b/documentation20/webdocs/markdowndocs/faq-ch.md
@@ -23,17 +23,87 @@
客户端遇到链接故障,请按照下面的步骤进行检查:
-1. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
-2. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
-3. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
-4. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
-5. 检查防火墙设置,确认TCP/UDP 端口6030-6039 是打开的
-6. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
-7. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
-8. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
+1. 检查网络环境
+ * 云服务器:检查云服务器的安全组是否打开TCP/UDP 端口6030-6042的访问权限
+ * 本地虚拟机:检查网络能否ping通,尽量避免使用`localhost` 作为hostname
+ * 公司服务器:如果为NAT网络环境,请务必检查服务器能否将消息返回值客户端
+
+2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
+
+3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
+
+4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
+
+5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
+
+6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的
+
+7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
+
+8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
+
+9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} `
检查服务器侧TCP端口连接是否工作:`nc -l {port}`
检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}`
+
+10. 可以使用taos程序内嵌的网络连通检测功能:验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP)。
+
+ taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server:表示运行检测服务端功能;-n client:表示运行检测客户端功能。
+
+ 1)首先在服务器上停止taosd服务;
+
+ 2)在服务器上运行taos内嵌的网络连通检测的服务端功能:taos -n server -P 6030 -e 6042 -l 1000;
+
+ 3)在客户端运行taos内嵌的网络连通检测的客户端功能:taos -n client -h host -P 6030 -e 6042 -l 1000;
+
+ -n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测;
+
+ -h:指示服务端名称,可以是ip地址或fqdn格式。如:192.168.1.160,或 192.168.1.160:6030,或 hostname1,或hostname1:6030。缺省值是127.0.0.1。
+
+ -P :检测的起始端口号,缺省值是6030;
+
+ -e:检测的结束端口号,必须大于等于起始端口号,缺省值是6042;
+
+ -l:指定检测端口连通的报文长度,最大64000字节,缺省值是1000字节,测试时服务端和客户端必须指定相同;
+
+ 服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号;
+
+ 对于起始端口号有三种设置方式:缺省值、-h指定、-P指定,优先级是:-P指定 > -h指定 > 缺省值。
+
+ 客户端运行的输出样例:
+
+ `sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6`
+
+ `host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000`
+
+ `tcp port:6030 test ok. udp port:6030 test ok.`
+
+ `tcp port:6031 test ok. udp port:6031 test ok.`
+
+ `tcp port:6032 test ok. udp port:6032 test ok.`
+
+ `tcp port:6033 test ok. udp port:6033 test ok.`
+
+ `tcp port:6034 test ok. udp port:6034 test ok.`
+
+ `tcp port:6035 test ok. udp port:6035 test ok.`
+
+ `tcp port:6036 test ok. udp port:6036 test ok.`
+
+ `tcp port:6037 test ok. udp port:6037 test ok.`
+
+ `tcp port:6038 test ok. udp port:6038 test ok.`
+
+ `tcp port:6039 test ok. udp port:6039 test ok.`
+
+ `tcp port:6040 test ok. udp port:6040 test ok.`
+
+ `tcp port:6041 test ok. udp port:6041 test ok.`
+
+ `tcp port:6042 test ok. udp port:6042 test ok.`
+
+ 如果某个端口不通,会输出 `port:xxxx test fail`的信息。
## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办?
diff --git a/packaging/deb/powerd b/packaging/deb/powerd
new file mode 100644
index 0000000000000000000000000000000000000000..bb77aab1660545c62e5db27b8a37d5d5937f623f
--- /dev/null
+++ b/packaging/deb/powerd
@@ -0,0 +1,88 @@
+#!/bin/bash
+#
+# Modified from original source: Elastic Search
+# https://github.com/elasticsearch/elasticsearch
+# Thank you to the Elastic Search authors
+#
+# chkconfig: 2345 99 01
+#
+### BEGIN INIT INFO
+# Provides: PowerDB
+# Required-Start: $local_fs $network $syslog
+# Required-Stop: $local_fs $network $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Starts PowerDB powerd
+# Description: Starts PowerDB powerd, a time-series database engine
+### END INIT INFO
+
+set -e
+
+PATH="/bin:/usr/bin:/sbin:/usr/sbin"
+NAME="PowerDB"
+USER="root"
+GROUP="root"
+DAEMON="/usr/local/power/bin/powerd"
+DAEMON_OPTS=""
+PID_FILE="/var/run/$NAME.pid"
+APPARGS=""
+
+# Maximum number of open files
+MAX_OPEN_FILES=65535
+
+. /lib/lsb/init-functions
+
+case "$1" in
+ start)
+
+ log_action_begin_msg "Starting PowerDB..."
+ if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
+
+ touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
+
+ if [ -n "$MAX_OPEN_FILES" ]; then
+ ulimit -n $MAX_OPEN_FILES
+ fi
+
+ start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
+
+ log_end_msg $?
+ fi
+ ;;
+
+ stop)
+ log_action_begin_msg "Stopping PowerDB..."
+ set +e
+ if [ -f "$PID_FILE" ]; then
+ start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
+ if [ $? -eq 1 ]; then
+ log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
+ elif [ $? -eq 3 ]; then
+ PID="`cat $PID_FILE`"
+ log_failure_msg "Failed to stop PowerDB (pid $PID)"
+ exit 1
+ fi
+ rm -f "$PID_FILE"
+ else
+ log_action_cont_msg "PowerDB was not running"
+ fi
+ log_action_end_msg 0
+ set -e
+ ;;
+
+ restart|force-reload)
+ if [ -f "$PID_FILE" ]; then
+ $0 stop
+ sleep 1
+ fi
+ $0 start
+ ;;
+ status)
+ status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
+ ;;
+ *)
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/packaging/release.sh b/packaging/release.sh
index bf355bdcd43f5229b1e7a82a2efda61fde6d7d18..7542a5b4cafb69d5cee16bddfc9a5651eb717b92 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -11,7 +11,9 @@ set -e
# -V [stable | beta]
# -l [full | lite]
# -s [static | dynamic]
+# -d [taos | power]
# -n [2.0.0.3]
+# -m [2.0.0.0]
# set parameters by default value
verMode=edge # [cluster, edge]
@@ -20,9 +22,11 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
+dbName=taos # [taos | power]
verNumber=""
+verNumberComp="2.0.0.0"
-while getopts "hv:V:c:o:l:s:n:" arg
+while getopts "hv:V:c:o:l:s:d:n:m:" arg
do
case $arg in
v)
@@ -45,10 +49,18 @@ do
#echo "soMode=$OPTARG"
soMode=$(echo $OPTARG)
;;
+ d)
+ #echo "dbName=$OPTARG"
+ dbName=$(echo $OPTARG)
+ ;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
+ m)
+ #echo "verNumberComp=$OPTARG"
+ verNumberComp=$(echo $OPTARG)
+ ;;
o)
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
@@ -60,7 +72,9 @@ do
echo " -V [stable | beta] "
echo " -l [full | lite] "
echo " -s [static | dynamic] "
+ echo " -d [taos | power] "
echo " -n [version number] "
+ echo " -m [compatible version number] "
exit 0
;;
?) #unknow option
@@ -70,216 +84,148 @@ do
esac
done
-echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} verNumber=${verNumber}"
+echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} verNumber=${verNumber} verNumberComp=${verNumberComp}"
curr_dir=$(pwd)
if [ "$osType" != "Darwin" ]; then
- script_dir="$(dirname $(readlink -f $0))"
- top_dir="$(readlink -f ${script_dir}/..)"
+ script_dir="$(dirname $(readlink -f $0))"
+ top_dir="$(readlink -f ${script_dir}/..)"
else
- script_dir=`dirname $0`
- cd ${script_dir}
- script_dir="$(pwd)"
- top_dir=${script_dir}/..
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ top_dir=${script_dir}/..
fi
-versioninfo="${top_dir}/src/util/src/version.c"
-
csudo=""
#if command -v sudo > /dev/null; then
-# csudo="sudo"
+# csudo="sudo"
#fi
function is_valid_version() {
- [ -z $1 ] && return 1 || :
+ [ -z $1 ] && return 1 || :
- rx='^([0-9]+\.){3}(\*|[0-9]+)$'
- if [[ $1 =~ $rx ]]; then
- return 0
- fi
- return 1
+ rx='^([0-9]+\.){3}(\*|[0-9]+)$'
+ if [[ $1 =~ $rx ]]; then
+ return 0
+ fi
+ return 1
}
function vercomp () {
- if [[ $1 == $2 ]]; then
- echo 0
- exit 0
- fi
-
- local IFS=.
- local i ver1=($1) ver2=($2)
-
- # fill empty fields in ver1 with zeros
- for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
- ver1[i]=0
- done
-
- for ((i=0; i<${#ver1[@]}; i++)); do
- if [[ -z ${ver2[i]} ]]; then
- # fill empty fields in ver2 with zeros
- ver2[i]=0
- fi
- if ((10#${ver1[i]} > 10#${ver2[i]})); then
- echo 1
- exit 0
- fi
- if ((10#${ver1[i]} < 10#${ver2[i]})); then
- echo 2
- exit 0
- fi
- done
+ if [[ $1 == $2 ]]; then
echo 0
-}
-
-# 1. Read version information
-version=$(cat ${versioninfo} | grep " version" | cut -d '"' -f2)
-compatible_version=$(cat ${versioninfo} | grep " compatible_version" | cut -d '"' -f2)
-
-if [ -z ${verNumber} ]; then
- while true; do
- read -p "Do you want to release a new version? [y/N]: " is_version_change
-
- if [[ ( "${is_version_change}" == "y") || ( "${is_version_change}" == "Y") ]]; then
- read -p "Please enter the new version: " tversion
- while true; do
- if (! is_valid_version $tversion) || [ "$(vercomp $tversion $version)" = '2' ]; then
- read -p "Please enter a correct version: " tversion
- continue
- fi
- version=${tversion}
- break
- done
-
- echo
-
- read -p "Enter the oldest compatible version: " tversion
- while true; do
-
- if [ -z $tversion ]; then
- break
- fi
-
- if (! is_valid_version $tversion) || [ "$(vercomp $version $tversion)" = '2' ]; then
- read -p "enter correct compatible version: " tversion
- else
- compatible_version=$tversion
- break
- fi
- done
+ exit 0
+ fi
+
+ local IFS=.
+ local i ver1=($1) ver2=($2)
+
+ # fill empty fields in ver1 with zeros
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
- break
- elif [[ ( "${is_version_change}" == "n") || ( "${is_version_change}" == "N") ]]; then
- echo "Use old version: ${version} compatible version: ${compatible_version}."
- break
- else
- continue
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]; then
+ # fill empty fields in ver2 with zeros
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]})); then
+ echo 1
+ exit 0
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]})); then
+ echo 2
+ exit 0
fi
done
-else
- echo "old version: $version, new version: $verNumber"
- #if ( ! is_valid_version $verNumber ) || [[ "$(vercomp $version $verNumber)" == '2' ]]; then
- # echo "please enter correct version"
- # exit 0
- #else
- version=${verNumber}
- #fi
-fi
+ echo 0
+}
-echo "=======================new version number: ${version}======================================"
+# 1. check version information
+if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
+ echo "please enter correct version"
+ exit 0
+fi
+
+echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
-# output the version info to the buildinfo file.
build_time=$(date +"%F %R")
-echo "char version[12] = \"${version}\";" > ${versioninfo}
-echo "char compatible_version[12] = \"${compatible_version}\";" >> ${versioninfo}
-echo "char gitinfo[48] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
-if [ "$verMode" != "cluster" ]; then
- echo "char gitinfoOfInternal[48] = \"\";" >> ${versioninfo}
-else
- enterprise_dir="${top_dir}/../enterprise"
- cd ${enterprise_dir}
- echo "char gitinfoOfInternal[48] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
- cd ${curr_dir}
-fi
-echo "char buildinfo[64] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo}
-echo "" >> ${versioninfo}
-tmp_version=$(echo $version | tr -s "." "_")
-if [ "$verMode" == "cluster" ]; then
- libtaos_info=${tmp_version}_${osType}_${cpuType}
-else
- libtaos_info=edge_${tmp_version}_${osType}_${cpuType}
-fi
-if [ "$verType" == "beta" ]; then
- libtaos_info=${libtaos_info}_${verType}
-fi
-echo "void libtaos_${libtaos_info}() {};" >> ${versioninfo}
+
+# get commint id from git
+gitinfo=$(git rev-parse --verify HEAD)
+enterprise_dir="${top_dir}/../enterprise"
+cd ${enterprise_dir}
+gitinfoOfInternal=$(git rev-parse --verify HEAD)
+cd ${curr_dir}
# 2. cmake executable file
compile_dir="${top_dir}/debug"
if [ -d ${compile_dir} ]; then
- ${csudo} rm -rf ${compile_dir}
+ ${csudo} rm -rf ${compile_dir}
fi
if [ "$osType" != "Darwin" ]; then
- ${csudo} mkdir -p ${compile_dir}
+ ${csudo} mkdir -p ${compile_dir}
else
- mkdir -p ${compile_dir}
+ mkdir -p ${compile_dir}
fi
cd ${compile_dir}
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
- if [ "$verMode" != "cluster" ]; then
- cmake ../ -DCPUTYPE=${cpuType} -DPAGMODE=${pagMode} -DOSTYPE=${osType} -DSOMODE=${soMode}
- else
- cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode}
- fi
+ if [ "$verMode" != "cluster" ]; then
+ cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
+ else
+ cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
+ fi
else
- echo "input cpuType=${cpuType} error!!!"
- exit 1
+ echo "input cpuType=${cpuType} error!!!"
+ exit 1
fi
make
cd ${curr_dir}
-# 3. judge the operating system type, then Call the corresponding script for packaging
-#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
-#osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
-#echo "osinfo: ${osinfo}"
-
+# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
- if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then
- echo "====do deb package for the ubuntu system===="
- output_dir="${top_dir}/debs"
- if [ -d ${output_dir} ]; then
- ${csudo} rm -rf ${output_dir}
- fi
- ${csudo} mkdir -p ${output_dir}
- cd ${script_dir}/deb
- ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
-
- echo "====do rpm package for the centos system===="
- output_dir="${top_dir}/rpms"
- if [ -d ${output_dir} ]; then
- ${csudo} rm -rf ${output_dir}
- fi
- ${csudo} mkdir -p ${output_dir}
- cd ${script_dir}/rpm
- ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
+ if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
+ echo "====do deb package for the ubuntu system===="
+ output_dir="${top_dir}/debs"
+ if [ -d ${output_dir} ]; then
+ ${csudo} rm -rf ${output_dir}
+ fi
+ ${csudo} mkdir -p ${output_dir}
+ cd ${script_dir}/deb
+ ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
+
+ echo "====do rpm package for the centos system===="
+ output_dir="${top_dir}/rpms"
+ if [ -d ${output_dir} ]; then
+ ${csudo} rm -rf ${output_dir}
fi
+ ${csudo} mkdir -p ${output_dir}
+ cd ${script_dir}/rpm
+ ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
+ fi
- echo "====do tar.gz package for all systems===="
- cd ${script_dir}/tools
-
- ${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
- ${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
- ${csudo} ./makearbi.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ echo "====do tar.gz package for all systems===="
+ cd ${script_dir}/tools
+
+ if [[ "$dbName" == "taos" ]]; then
+ ${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ ${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ ${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ else
+ ${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
+ ${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
+ ${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ fi
else
- cd ${script_dir}/tools
- ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
+ cd ${script_dir}/tools
+ ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName}
fi
-# 4. Clean up temporary compile directories
-#${csudo} rm -rf ${compile_dir}
-
diff --git a/packaging/rpm/powerd b/packaging/rpm/powerd
new file mode 100644
index 0000000000000000000000000000000000000000..bf7f19aea2d0e82e6ff46667a421fece44d149bf
--- /dev/null
+++ b/packaging/rpm/powerd
@@ -0,0 +1,145 @@
+#!/bin/bash
+#
+# power This shell script takes care of starting and stopping PowerDB.
+#
+# chkconfig: 2345 99 01
+# description: PowerDB is a districuted, scalable, high-performance Time Series Database
+# (TSDB). More than just a pure database, PowerDB also provides the ability
+# to do stream computing, aggregation etc.
+#
+#
+### BEGIN INIT INFO
+# Provides: powerd
+# Required-Start: $network $local_fs $remote_fs
+# Required-Stop: $network $local_fs $remote_fs
+# Short-Description: start and stop powerd
+# Description: PowerDB is a districuted, scalable, high-performance Time Series Database
+# (TSDB). More than just a pure database, PowerDB also provides the ability
+# to do stream computing, aggregation etc.
+### END INIT INFO
+
+# Source init functions
+. /etc/init.d/functions
+
+# Maximum number of open files
+MAX_OPEN_FILES=65535
+
+# Default program options
+NAME=powerd
+PROG=/usr/local/power/bin/powerd
+USER=root
+GROUP=root
+
+# Default directories
+LOCK_DIR=/var/lock/subsys
+PID_DIR=/var/run/$NAME
+
+# Set file names
+LOCK_FILE=$LOCK_DIR/$NAME
+PID_FILE=$PID_DIR/$NAME.pid
+
+[ -e $PID_DIR ] || mkdir -p $PID_DIR
+
+PROG_OPTS=""
+
+start() {
+ echo -n "Starting ${NAME}: "
+ # check identity
+ curid="`id -u -n`"
+ if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
+ echo "Must be run as root or $USER, but was run as $curid"
+ return 1
+ fi
+ # Sets the maximum number of open file descriptors allowed.
+ ulimit -n $MAX_OPEN_FILES
+ curulimit="`ulimit -n`"
+ if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
+ echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
+ return 1
+ fi
+
+ if [ "`id -u -n`" == root ] ; then
+ # Changes the owner of the lock, and the pid files to allow
+ # non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
+ touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
+ touch $PID_FILE && chown $USER:$GROUP $PID_FILE
+ daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
+ else
+ # Don't have to change user.
+ daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
+ fi
+ retval=$?
+ sleep 2
+ echo
+ [ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
+ return $retval
+}
+
+stop() {
+ echo -n "Stopping ${NAME}: "
+ killproc -p $PID_FILE $NAME
+ retval=$?
+ echo
+ # Non-root users don't have enough permission to remove pid and lock files.
+ # So, the opentsdb_restart.py cannot get rid of the files, and the command
+ # "service opentsdb status" will complain about the existing pid file.
+ # Makes the pid file empty.
+ echo > $PID_FILE
+ [ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
+ return $retval
+}
+
+restart() {
+ stop
+ start
+}
+
+reload() {
+ restart
+}
+
+force_reload() {
+ restart
+}
+
+rh_status() {
+ # run checks to determine if the service is running or use generic status
+ status -p $PID_FILE -l $LOCK_FILE $NAME
+}
+
+rh_status_q() {
+ rh_status >/dev/null 2>&1
+}
+
+case "$1" in
+ start)
+ rh_status_q && exit 0
+ $1
+ ;;
+ stop)
+ rh_status_q || exit 0
+ $1
+ ;;
+ restart)
+ $1
+ ;;
+ reload)
+ rh_status_q || exit 7
+ $1
+ ;;
+ force-reload)
+ force_reload
+ ;;
+ status)
+ rh_status
+ ;;
+ condrestart|try-restart)
+ rh_status_q || exit 0
+ restart
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
+ exit 2
+esac
+
+exit $?
diff --git a/packaging/tools/install_arbi_power.sh b/packaging/tools/install_arbi_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4b12913760e013049f44ec1ba9f7275322c436bd
--- /dev/null
+++ b/packaging/tools/install_arbi_power.sh
@@ -0,0 +1,297 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+
+# old bin dir
+bin_dir="/usr/local/tarbitrator/bin"
+
+service_config_dir="/etc/systemd/system"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact taosdata.com for support."
+ os_type=1
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ #${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/remove_arbi_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_power.sh ${bin_link_dir}/rmtarbitrator || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install powerd service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
+ #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+# power:2345:respawn:/etc/init.d/tarbitratord start
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=PowerDB arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ ${csudo} systemctl enable tarbitratord
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_tarbitrator
+ fi
+}
+
+function update_PowerDB() {
+ # Start to update
+ echo -e "${GREEN}Start to update PowerDB's arbitrator ...${NC}"
+ # Stop the service if running
+ if pidof tarbitrator &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tarbitratord || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tarbitratord stop || :
+ else
+ kill_tarbitrator
+ fi
+ sleep 1
+ fi
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+
+ echo
+ #echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
+ fi
+ echo
+ echo -e "\033[44;32;1mPowerDB's arbitrator is updated successfully!${NC}"
+}
+
+function install_PowerDB() {
+ # Start to install
+ echo -e "${GREEN}Start to install PowerDB's arbitrator ...${NC}"
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+ echo
+ #echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
+ fi
+
+ echo -e "\033[44;32;1mPowerDB's arbitrator is installed successfully!${NC}"
+ echo
+}
+
+
+## ==============================Main program starts from here============================
+# Install server and client
+if [ -x ${bin_dir}/tarbitrator ]; then
+ update_flag=1
+ update_PowerDB
+else
+ install_PowerDB
+fi
+
diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh
index 6a1b7be191b626f59ab61510a6c2b701b85bca9e..24586d3390fe5c4f38b6292442fed2936d5ea7a6 100755
--- a/packaging/tools/install_client.sh
+++ b/packaging/tools/install_client.sh
@@ -85,7 +85,7 @@ function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" == "Darwin" ]; then
- ${csudo} rm -f ${bin_link_dir}/taosdump || :
+ ${csudo} rm -f ${bin_link_dir}/taosdemo || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@@ -95,7 +95,7 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" == "Darwin" ]; then
- [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
+ [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0108d1d44a186043aa5f83e01f44fb1540f6e0fd
--- /dev/null
+++ b/packaging/tools/install_client_power.sh
@@ -0,0 +1,249 @@
+#!/bin/bash
+#
+# This file is used to install PowerDB client on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+
+osType=Linux
+pagMode=full
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir=$(dirname $(readlink -f "$0"))
+ # Dynamic directory
+ data_dir="/var/lib/power"
+ log_dir="/var/log/power"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ data_dir="/var/lib/power"
+ log_dir="~/PowerDBLog"
+fi
+
+log_link_dir="/usr/local/power/log"
+
+cfg_install_dir="/etc/power"
+
+if [ "$osType" != "Darwin" ]; then
+ bin_link_dir="/usr/bin"
+ lib_link_dir="/usr/lib"
+ lib64_link_dir="/usr/lib64"
+ inc_link_dir="/usr/include"
+else
+ bin_link_dir="/usr/local/bin"
+ lib_link_dir="/usr/local/lib"
+ inc_link_dir="/usr/local/include"
+fi
+
+#install main path
+install_main_dir="/usr/local/power"
+
+# old bin dir
+bin_dir="/usr/local/power/bin"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/power"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+function kill_client() {
+ pid=$(ps -ef | grep "power" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/power || :
+ if [ "$osType" == "Darwin" ]; then
+ ${csudo} rm -f ${bin_link_dir}/powerdemo || :
+ fi
+ ${csudo} rm -f ${bin_link_dir}/rmpower || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
+ if [ "$osType" == "Darwin" ]; then
+ [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
+ fi
+ [ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ sudo rm -f /usr/lib/libtaos.* || :
+ sudo rm -rf ${lib_dir} || :
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [ -d "${lib64_link_dir}" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+ else
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
+ fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_config() {
+ #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
+
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ else
+ mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ fi
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function update_PowerDB() {
+ # Start to update
+ if [ ! -e power.tar.gz ]; then
+ echo "File power.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf power.tar.gz
+
+ echo -e "${GREEN}Start to update PowerDB client...${NC}"
+ # Stop the client shell if running
+ if pidof power &> /dev/null; then
+ kill_client
+ sleep 1
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mPowerDB client is updated successfully!${NC}"
+
+ rm -rf $(tar -tf power.tar.gz)
+}
+
+function install_PowerDB() {
+ # Start to install
+ if [ ! -e power.tar.gz ]; then
+ echo "File power.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf power.tar.gz
+
+ echo -e "${GREEN}Start to install PowerDB client...${NC}"
+
+ install_main_path
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mPowerDB client is installed successfully!${NC}"
+
+ rm -rf $(tar -tf power.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+# Install or updata client and client
+# if server is already install, don't install client
+ if [ -e ${bin_dir}/powerd ]; then
+ echo -e "\033[44;32;1mThere are already installed PowerDB server, so don't need install client!${NC}"
+ exit 0
+ fi
+
+ if [ -x ${bin_dir}/power ]; then
+ update_flag=1
+ update_PowerDB
+ else
+ install_PowerDB
+ fi
diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..df6291f4ae54f6706f1a74e9a33f5b1bb361edcd
--- /dev/null
+++ b/packaging/tools/install_power.sh
@@ -0,0 +1,733 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+verMode=edge
+pagMode=full
+
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+# Dynamic directory
+data_dir="/var/lib/power"
+log_dir="/var/log/power"
+
+data_link_dir="/usr/local/power/data"
+log_link_dir="/usr/local/power/log"
+
+cfg_install_dir="/etc/power"
+
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/power"
+
+# old bin dir
+bin_dir="/usr/local/power/bin"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/power"
+
+service_config_dir="/etc/systemd/system"
+nginx_port=6060
+nginx_dir="/usr/local/nginxd"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact taosdata.com for support."
+ os_type=1
+fi
+
+
+# ============================= get input parameters =================================================
+
+# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
+
+# set parameters by default value
+interactiveFqdn=yes # [yes | no]
+verType=server # [server | client]
+initType=systemd # [systemd | service | ...]
+
+while getopts "hv:e:i:" arg
+do
+ case $arg in
+ e)
+ #echo "interactiveFqdn=$OPTARG"
+ interactiveFqdn=$( echo $OPTARG )
+ ;;
+ v)
+ #echo "verType=$OPTARG"
+ verType=$(echo $OPTARG)
+ ;;
+ i)
+ #echo "initType=$OPTARG"
+ initType=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
+
+function kill_powerd() {
+ pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} mkdir -p ${nginx_dir}
+ fi
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/power || :
+ ${csudo} rm -f ${bin_link_dir}/powerd || :
+ ${csudo} rm -f ${bin_link_dir}/powerdemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmpower || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
+ [ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || :
+ [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
+ [ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/*
+ ${csudo} mkdir -p ${nginx_dir}/logs
+ ${csudo} chmod 777 ${nginx_dir}/sbin/nginx
+ fi
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+
+ #if [ "$verMode" == "cluster" ]; then
+ # # Compatible with version 1.5
+ # ${csudo} mkdir -p ${v15_java_app_dir}
+ # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
+ # ${csudo} chmod 777 ${v15_java_app_dir} || :
+ #fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_config() {
+ #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
+
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+
+ [ ! -z $1 ] && return 0 || : # only install client
+
+ if ((${update_flag}==1)); then
+ return 0
+ fi
+
+ if [ "$interactiveFqdn" == "no" ]; then
+ return 0
+ fi
+
+ #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
+ #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
+ #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)"
+ #FQDN_PATTERN=":[0-9]{1,5}$"
+
+ # first full-qualified domain name (FQDN) for PowerDB cluster system
+ echo
+ echo -e -n "${GREEN}Enter FQDN:port (like h1.powerdata.com:6030) of an existing PowerDB cluster node to join${NC}"
+ echo
+ echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
+ read firstEp
+ while true; do
+ if [ ! -z "$firstEp" ]; then
+ # check the format of the firstEp
+ #if [[ $firstEp == $FQDN_PATTERN ]]; then
+ # Write the first FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ break
+ #else
+ # read -p "Please enter the correct FQDN:port: " firstEp
+ #fi
+ else
+ break
+ fi
+ done
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_data() {
+ ${csudo} mkdir -p ${data_dir}
+
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof powerd &> /dev/null; then
+ ${csudo} service powerd stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/powerd ]; then
+ ${csudo} chkconfig --del powerd || :
+ fi
+
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/powerd ]; then
+ ${csudo} insserv -r powerd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/powerd ]; then
+ ${csudo} update-rc.d -f powerd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/powerd || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install powerd service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/powerd.deb ${install_main_dir}/init.d/powerd
+ ${csudo} cp ${script_dir}/init.d/powerd.deb ${service_config_dir}/powerd && ${csudo} chmod a+x ${service_config_dir}/powerd
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/powerd.rpm ${install_main_dir}/init.d/powerd
+ ${csudo} cp ${script_dir}/init.d/powerd.rpm ${service_config_dir}/powerd && ${csudo} chmod a+x ${service_config_dir}/powerd
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
+ #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add powerd || :
+ ${csudo} chkconfig --level 2345 powerd on || :
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv powerd || :
+ ${csudo} insserv -d powerd || :
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d powerd defaults || :
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ powerd_service_config="${service_config_dir}/powerd.service"
+ if systemctl is-active --quiet powerd; then
+ echo "PowerDB is running, stopping it..."
+ ${csudo} systemctl stop powerd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable powerd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${powerd_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ if systemctl is-active --quiet nginxd; then
+ echo "Nginx for TDengine is running, stopping it..."
+ ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+}
+
+# power:2345:respawn:/etc/init.d/powerd start
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ powerd_service_config="${service_config_dir}/powerd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'Description=PowerDB server service' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo >> ${powerd_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/powerd' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo >> ${powerd_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${powerd_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${powerd_service_config}"
+ ${csudo} systemctl enable powerd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ #${csudo} systemctl enable tarbitratord
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
+ if ! ${csudo} systemctl enable nginxd &> /dev/null; then
+ ${csudo} systemctl daemon-reexec
+ ${csudo} systemctl enable nginxd
+ fi
+ ${csudo} systemctl start nginxd
+ fi
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop powerd
+ kill_powerd
+ fi
+}
+
+vercomp () {
+ if [[ $1 == $2 ]]; then
+ return 0
+ fi
+ local IFS=.
+ local i ver1=($1) ver2=($2)
+ # fill empty fields in ver1 with zeros
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
+
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]
+ then
+ # fill empty fields in ver2 with zeros
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]}))
+ then
+ return 1
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]}))
+ then
+ return 2
+ fi
+ done
+ return 0
+}
+
+function is_version_compatible() {
+
+ curr_version=$(${bin_dir}/powerd -V | head -1 | cut -d ' ' -f 3)
+
+ min_compatible_version=$(${script_dir}/bin/powerd -V | head -1 | cut -d ' ' -f 5)
+
+ vercomp $curr_version $min_compatible_version
+ case $? in
+ 0) return 0;;
+ 1) return 0;;
+ 2) return 1;;
+ esac
+}
+
+function update_PowerDB() {
+ # Start to update
+ if [ ! -e power.tar.gz ]; then
+ echo "File power.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf power.tar.gz
+
+ # Check if version compatible
+ if ! is_version_compatible; then
+ echo -e "${RED}Version incompatible${NC}"
+ return 1
+ fi
+
+ echo -e "${GREEN}Start to update PowerDB...${NC}"
+ # Stop the service if running
+ if pidof powerd &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop powerd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service powerd stop || :
+ else
+ kill_powerd
+ fi
+ sleep 1
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ if [ -z $1 ]; then
+ install_bin
+ install_service
+ install_config
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if openresty is installed
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for PowerDB is updated successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for PowerDB does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ #echo
+ #echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} service powerd start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start PowerDB ${NC}: ./powerd${NC}"
+ fi
+
+ if [ ${openresty_work} = 'true' ]; then
+ echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ else
+ echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}"
+ fi
+
+ echo
+ echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}"
+ else
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mPowerDB client is updated successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf power.tar.gz)
+}
+
+function install_PowerDB() {
+ # Start to install
+ if [ ! -e power.tar.gz ]; then
+ echo "File power.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf power.tar.gz
+
+ echo -e "${GREEN}Start to install PowerDB...${NC}"
+
+ install_main_path
+
+ if [ -z $1 ]; then
+ install_data
+ fi
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+
+ if [ -z $1 ]; then # install service and client
+ # For installing new
+ install_bin
+ install_service
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for PowerDB is installed successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for PowerDB does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ install_config
+
+ # Ask if to start the service
+ #echo
+ #echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} service powerd start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start PowerDB ${NC}: powerd${NC}"
+ fi
+
+ if [ ${openresty_work} = 'true' ]; then
+ echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ else
+ echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}"
+ fi
+
+ if [ ! -z "$firstEp" ]; then
+ echo
+ echo -e "${GREEN_DARK}Please run${NC}: power -h $firstEp${GREEN_DARK} to login into cluster, then${NC}"
+ echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
+ echo
+ fi
+ echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}"
+ echo
+ else # Only install client
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mPowerDB client is installed successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf power.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+if [ "$verType" == "server" ]; then
+ # Install server and client
+ if [ -x ${bin_dir}/powerd ]; then
+ update_flag=1
+ update_PowerDB
+ else
+ install_PowerDB
+ fi
+elif [ "$verType" == "client" ]; then
+ interactiveFqdn=no
+ # Only install client
+ if [ -x ${bin_dir}/power ]; then
+ update_flag=1
+ update_PowerDB client
+ else
+ install_PowerDB client
+ fi
+else
+ echo "please input correct verType"
+fi
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 74aa1495fe5de21e4326fa3a0851b292735467c4..eff70d8035af0291f6dc7040ec13632fec4fa3be 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -10,6 +10,7 @@ set -e
source_dir=$1
binary_dir=$2
osType=$3
+verNumber=$4
if [ "$osType" != "Darwin" ]; then
script_dir=$(dirname $(readlink -f "$0"))
@@ -179,19 +180,18 @@ function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
- versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c)
if [ "$osType" != "Darwin" ]; then
- ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
- ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1
+ ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+ ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ -d "${lib64_link_dir}" ]; then
- ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib64_link_dir}/libtaos.so.1
+ ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
fi
else
- ${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
- ${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} cp ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+ ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
diff --git a/packaging/tools/makearbi_power.sh b/packaging/tools/makearbi_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5296cc8e3f334c0ed0f8e5ca30137e367b021929
--- /dev/null
+++ b/packaging/tools/makearbi_power.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Generate arbitrator's tar.gz setup package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/PowerDB-enterprise-arbitrator"
+else
+ install_dir="${release_dir}/PowerDB-arbitrator"
+fi
+
+# Directories and files.
+bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_power.sh"
+install_files="${script_dir}/install_arbi_power.sh"
+
+#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_power.sh || :
+#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
+mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${version}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${version}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index 8545a3e5e4b7f41c24814fd4eb327c7d86301f9d..665fb2845c83a05153cf411f65a1a4323e8cc819 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -41,7 +41,7 @@ fi
if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then
- strip ${build_dir}/bin/taosd
+ #strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d4be52f67911ee22f6d914cdc6bc6de9c7b0fb06
--- /dev/null
+++ b/packaging/tools/makeclient_power.sh
@@ -0,0 +1,197 @@
+#!/bin/bash
+#
+# Generate tar.gz package for linux client in all os system
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir="$(dirname $(readlink -f $0))"
+ top_dir="$(readlink -f ${script_dir}/../..)"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ top_dir=${script_dir}/../..
+fi
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/PowerDB-enterprise-client"
+else
+ install_dir="${release_dir}/PowerDB-client"
+fi
+
+# Directories and files.
+
+if [ "$osType" != "Darwin" ]; then
+# if [ "$pagMode" == "lite" ]; then
+# strip ${build_dir}/bin/powerd
+# strip ${build_dir}/bin/power
+# bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh"
+# else
+# bin_files="${build_dir}/bin/power ${build_dir}/bin/powerdemo ${script_dir}/remove_client_power.sh ${script_dir}/set_core.sh"
+# fi
+ lib_files="${build_dir}/lib/libtaos.so.${version}"
+else
+ bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh"
+ lib_files="${build_dir}/lib/libtaos.${version}.dylib"
+fi
+
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+cfg_dir="${top_dir}/packaging/cfg"
+
+install_files="${script_dir}/install_client_power.sh"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
+sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
+sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
+
+mkdir -p ${install_dir}/bin
+if [ "$osType" != "Darwin" ]; then
+ if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taos
+ cp ${build_dir}/bin/taos ${install_dir}/bin/power
+ cp ${script_dir}/remove_power.sh ${install_dir}/bin
+ else
+ cp ${build_dir}/bin/taos ${install_dir}/bin/power
+ cp ${script_dir}/remove_power.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ fi
+else
+ cp ${bin_files} ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+cd ${install_dir}
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f power.tar.gz * --remove-files || :
+else
+ tar -zcv -f power.tar.gz * || :
+ mv power.tar.gz ..
+ rm -rf ./*
+ mv ../power.tar.gz .
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$osType" == "Darwin" ]; then
+ sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_power.sh >> install_client_power_temp.sh
+ mv install_client_power_temp.sh ${install_dir}/install_client_power.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_power.sh >> install_client_power_temp.sh
+ mv install_client_power_temp.sh ${install_dir}/install_client_power.sh
+fi
+chmod a+x ${install_dir}/install_client_power.sh
+
+# Copy example code
+mkdir -p ${install_dir}/examples
+examples_dir="${top_dir}/tests/examples"
+cp -r ${examples_dir}/c ${install_dir}/examples
+sed -i '/passwd/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
+sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
+ sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go
+fi
+# Copy driver
+mkdir -p ${install_dir}/driver
+cp ${lib_files} ${install_dir}/driver
+
+# Copy connector
+connector_dir="${code_dir}/connector"
+mkdir -p ${install_dir}/connector
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ if [ "$osType" != "Darwin" ]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector
+ fi
+ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
+ cp -r ${connector_dir}/python ${install_dir}/connector/
+ cp -r ${connector_dir}/go ${install_dir}/connector
+
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
+
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
+
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
+fi
+# Copy release note
+# cp ${script_dir}/release_note ${install_dir}
+
+# exit 1
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${version}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${version}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stable or beta"
+ exit 1
+fi
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+else
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
+ mv "$(basename ${pkg_name}).tar.gz" ..
+ rm -rf ./*
+ mv ../"$(basename ${pkg_name}).tar.gz" .
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..744f78e514611125bd0ecce1d53e5534656924c9
--- /dev/null
+++ b/packaging/tools/makepkg_power.sh
@@ -0,0 +1,215 @@
+#!/bin/bash
+#
+# Generate tar.gz package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/PowerDB-enterprise-server"
+else
+ install_dir="${release_dir}/PowerDB-server"
+fi
+
+# Directories and files.
+#if [ "$pagMode" == "lite" ]; then
+# strip ${build_dir}/bin/taosd
+# strip ${build_dir}/bin/taos
+# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
+#else
+# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
+#fi
+
+lib_files="${build_dir}/lib/libtaos.so.${version}"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+cfg_dir="${top_dir}/packaging/cfg"
+install_files="${script_dir}/install_power.sh"
+nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
+
+# Init file
+#init_dir=${script_dir}/deb
+#if [ $package_type = "centos" ]; then
+# init_dir=${script_dir}/rpm
+#fi
+#init_files=${init_dir}/powerd
+# temp use rpm's powerd. TODO: later modify according to os type
+init_file_deb=${script_dir}/../deb/powerd
+init_file_rpm=${script_dir}/../rpm/powerd
+init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/bin
+if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taos
+# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
+ cp ${build_dir}/bin/taos ${install_dir}/bin/power
+ cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
+ cp ${script_dir}/remove_power.sh ${install_dir}/bin
+else
+# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
+ cp ${build_dir}/bin/taos ${install_dir}/bin/power
+ cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
+ cp ${script_dir}/remove_power.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
+ cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/powerd.deb
+mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/powerd.rpm
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_power.sh >> remove_power_temp.sh
+ mv remove_power_temp.sh ${install_dir}/bin/remove_power.sh
+
+ mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
+ cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
+ rm -rf ${install_dir}/nginxd/png
+
+ sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html
+ sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js
+
+ sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
+ sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
+ sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
+
+ if [ "$cpuType" == "aarch64" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
+ elif [ "$cpuType" == "aarch32" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
+ fi
+ rm -rf ${install_dir}/nginxd/sbin/arm
+fi
+
+cd ${install_dir}
+tar -zcv -f power.tar.gz * --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar power.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_power.sh >> install_power_temp.sh
+ mv install_power_temp.sh ${install_dir}/install_power.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_power_temp.sh
+ mv install_power_temp.sh ${install_dir}/install_power.sh
+fi
+chmod a+x ${install_dir}/install_power.sh
+
+# Copy example code
+mkdir -p ${install_dir}/examples
+examples_dir="${top_dir}/tests/examples"
+cp -r ${examples_dir}/c ${install_dir}/examples
+sed -i '/passwd/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
+sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
+ sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go
+fi
+# Copy driver
+mkdir -p ${install_dir}/driver
+cp ${lib_files} ${install_dir}/driver
+
+# Copy connector
+connector_dir="${code_dir}/connector"
+mkdir -p ${install_dir}/connector
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector
+ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
+ cp -r ${connector_dir}/python ${install_dir}/connector/
+ cp -r ${connector_dir}/go ${install_dir}/connector
+
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
+
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
+ sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
+
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
+ sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
+fi
+# Copy release note
+# cp ${script_dir}/release_note ${install_dir}
+
+# exit 1
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${version}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${version}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index dd6fe65eb7e69ecc85c255e37e5b97d468048157..e9a4f48cf78469ac3b5d3a46cc058430d6da4d56 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -102,36 +102,31 @@ function clean_log() {
function clean_service_on_systemd() {
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
-
if systemctl is-active --quiet ${taos_service_name}; then
echo "TDengine taosd is running, stopping it..."
${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
-
${csudo} rm -f ${taosd_service_config}
-
+
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "TDengine tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
if [ "$verMode" == "cluster" ]; then
- tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
-
- if systemctl is-active --quiet ${tarbitrator_service_name}; then
- echo "TDengine tarbitrator is running, stopping it..."
- ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
- fi
- ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
-
- ${csudo} rm -f ${tarbitratord_service_config}
-
- nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
- if [ -d ${bin_dir}/web ]; then
- if systemctl is-active --quiet ${nginx_service_name}; then
- echo "Nginx for TDengine is running, stopping it..."
- ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
- fi
- ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
-
- ${csudo} rm -f ${nginx_service_config}
+ nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
+ if [ -d ${bin_dir}/web ]; then
+ if systemctl is-active --quiet ${nginx_service_name}; then
+ echo "Nginx for TDengine is running, stopping it..."
+ ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
fi
+ ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${nginx_service_config}
+ fi
fi
}
@@ -227,3 +222,4 @@ elif echo $osinfo | grep -qwi "centos" ; then
fi
echo -e "${GREEN}TDengine is removed successfully!${NC}"
+echo
\ No newline at end of file
diff --git a/packaging/tools/remove_arbi_power.sh b/packaging/tools/remove_arbi_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..077b19ec7d4208c604c2042c2aa1eacab2033c5b
--- /dev/null
+++ b/packaging/tools/remove_arbi_power.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall PowerDB's arbitrator
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+service_config_dir="/etc/systemd/system"
+tarbitrator_service_name="tarbitratord"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf /arbitrator.log || :
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "PowerDB tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ echo "PowerDB's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+##clean_header
+# Remove log file
+clean_log
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}PowerDB's arbitrator is removed successfully!${NC}"
+echo
\ No newline at end of file
diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh
index 7cbf524d04734e5b5a73b37c44bf23b2c0254157..2c28b7b6bf41b3798d533b8835a5ff311eecfdb4 100755
--- a/packaging/tools/remove_client.sh
+++ b/packaging/tools/remove_client.sh
@@ -39,6 +39,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
@@ -80,3 +81,4 @@ clean_config
${csudo} rm -rf ${install_main_dir}
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_client_power.sh b/packaging/tools/remove_client_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7a3c99e100c7b0e363bdf15bf19b8b523124c45b
--- /dev/null
+++ b/packaging/tools/remove_client_power.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+#
+# Script to stop the client and uninstall database, but retain the config and log files.
+set -e
+# set -x
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/power"
+
+log_link_dir="/usr/local/power/log"
+cfg_link_dir="/usr/local/power/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/power"
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+function kill_client() {
+ #pid=$(ps -ef | grep "power" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$(pidof power)" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/power || :
+ ${csudo} rm -f ${bin_link_dir}/powerdemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmpower || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+# Stop client.
+kill_client
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}PowerDB client is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_power.sh b/packaging/tools/remove_power.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d6d6c5dd7c7744363add2cafb23ea0178533ab8d
--- /dev/null
+++ b/packaging/tools/remove_power.sh
@@ -0,0 +1,226 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall TDengine, but retain the config, data and log files.
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/power"
+data_link_dir="/usr/local/power/data"
+log_link_dir="/usr/local/power/log"
+cfg_link_dir="/usr/local/power/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+install_nginxd_dir="/usr/local/nginxd"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/power"
+
+service_config_dir="/etc/systemd/system"
+power_service_name="powerd"
+tarbitrator_service_name="tarbitratord"
+nginx_service_name="nginxd"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_powerd() {
+ pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/power || :
+ ${csudo} rm -f ${bin_link_dir}/powerd || :
+ ${csudo} rm -f ${bin_link_dir}/powerdemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmpower || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+function clean_service_on_systemd() {
+ power_service_config="${service_config_dir}/${power_service_name}.service"
+ if systemctl is-active --quiet ${power_service_name}; then
+ echo "PowerDB powerd is running, stopping it..."
+ ${csudo} systemctl stop ${power_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${power_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${power_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "TDengine tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
+ if [ -d ${bin_dir}/web ]; then
+ if systemctl is-active --quiet ${nginx_service_name}; then
+ echo "Nginx for TDengine is running, stopping it..."
+ ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof powerd &> /dev/null; then
+ echo "PowerDB powerd is running, stopping it..."
+ ${csudo} service powerd stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ echo "PowerDB tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/powerd ]; then
+ ${csudo} chkconfig --del powerd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/powerd ]; then
+ ${csudo} insserv -r powerd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/powerd ]; then
+ ${csudo} update-rc.d -f powerd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/powerd || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_powerd
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+# Remove data link directory
+${csudo} rm -rf ${data_link_dir} || :
+
+${csudo} rm -rf ${install_main_dir}
+${csudo} rm -rf ${install_nginxd_dir}
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+else
+ osinfo=""
+fi
+
+#if echo $osinfo | grep -qwi "ubuntu" ; then
+## echo "this is ubuntu system"
+# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
+#elif echo $osinfo | grep -qwi "debian" ; then
+## echo "this is debian system"
+# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
+#elif echo $osinfo | grep -qwi "centos" ; then
+## echo "this is centos system"
+# ${csudo} rpm -e --noscripts tdengine || :
+#fi
+
+echo -e "${GREEN}PowerDB is removed successfully!${NC}"
+echo
\ No newline at end of file
diff --git a/snap/hooks/install b/snap/hooks/install
index e58918d2c31a4b49f2c7e9e2f34fd0928c2d7ff3..542be0b8340893b652f38be6fec5711ef7c09fb9 100755
--- a/snap/hooks/install
+++ b/snap/hooks/install
@@ -5,7 +5,9 @@ if [ ! -d /var/lib/taos ]; then
fi
if [ ! -d /var/log/taos ]; then
- mkdir -p -m777 /var/log/taos
+ mkdir -p --mode=777 /var/log/taos
+else
+ chmod 777 /var/log/taos
fi
if [ ! -d /etc/taos ]; then
@@ -13,5 +15,8 @@ if [ ! -d /etc/taos ]; then
fi
if [ ! -f /etc/taos/taos.cfg ]; then
+ if [ ! -d /etc/taos ]; then
+ mkdir -p /etc/taos
+ fi
cp $SNAP/etc/taos/taos.cfg /etc/taos/taos.cfg
fi
diff --git a/snap/local/launcher.sh b/snap/local/launcher.sh
index 52b3e4ce5c1305d3b46b520c78643074e9ad22a2..29a7a63779097e0d807d1e4bbbfa46afcf92dc97 100755
--- a/snap/local/launcher.sh
+++ b/snap/local/launcher.sh
@@ -15,11 +15,12 @@ case "$SNAP_USER_COMMON" in
*) COMMON=$SNAP_USER_COMMON ;;
esac
-if [ -d /etc/taos ]; then
- CONFIG_FILE="/etc/taos"
-else
- CONFIG_FILE="$SNAP/etc/taos"
+if [ ! -f $SNAP_DATA/etc/taos/taos.cfg ]; then
+ if [ ! -d $SNAP_DATA/etc/taos ]; then
+ mkdir -p $SNAP_DATA/etc/taos
+ fi
+ cp $SNAP/etc/taos/taos.cfg $SNAP_DATA/etc/taos
fi
# Launch the snap
-$SNAP/usr/bin/taosd -c $CONFIG_FILE $@
+$SNAP/usr/bin/taosd -c /etc/taos $@
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index cf4f9933949b625079b4e53ff7940ebf64aec6c8..7a0e1c3b805033b057da4a302ba32cee869d478b 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,13 +1,13 @@
name: tdengine
base: core18 # the base snap is the execution environment for this snap
-version: '2.0.0.6' # just for humans, typically '1.2+git' or '1.3.2'
+version: '2.0.2.0' # just for humans, typically '1.2+git' or '1.3.2'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations.
grade: stable
-confinement: classic
+confinement: strict
apps:
tdengine:
@@ -24,7 +24,9 @@ apps:
command: taoswrapper.sh
plugs:
- network
+ - system-observe
- systemfiles
+ - historyfile
taosdemo:
command: usr/bin/taosdemo
@@ -32,11 +34,19 @@ apps:
- network
plugs:
+ historyfile:
+ interface: personal-files
+ read:
+ - $HOME/.taos_history
+ write:
+ - $HOME/.taos_history
+
systemfiles:
interface: system-files
read:
- /etc/taos
- /var/lib/taos
+ - /var/log/taos
- /tmp
write:
- /var/log/taos
@@ -77,7 +87,7 @@ parts:
mkdir -p $SNAPCRAFT_STAGE/var/lib/taos
fi
if [ ! -d $SNAPCRAFT_STAGE/var/log/taos ]; then
- mkdir -p $SNAPCRAFT_STAGE/var/log/taos
+ mkdir -p --mode=777 $SNAPCRAFT_STAGE/var/log/taos
fi
prime:
@@ -85,16 +95,16 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.0.0.6
+ - usr/lib/libtaos.so.2.0.2.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
override-prime: |
snapcraftctl prime
- if [ ! -d $SNAPCRAFT_STAGE/var/lib/taos ]; then
+ if [ ! -d $SNAPCRAFT_PRIME/var/lib/taos ]; then
cp -rf $SNAPCRAFT_STAGE/var/lib/taos $SNAPCRAFT_PRIME
fi
- if [ ! -d $SNAPCRAFT_STAGE/var/log/taos ]; then
+ if [ ! -d $SNAPCRAFT_PRIME/var/log/taos ]; then
cp -rf $SNAPCRAFT_STAGE/var/log/taos $SNAPCRAFT_PRIME
fi
@@ -103,11 +113,10 @@ layout:
bind: $SNAP_DATA/var/lib/taos
/var/log/taos:
bind: $SNAP_DATA/var/log/taos
- /etc/taos/taos.cfg:
- bind-file: $SNAP_DATA/etc/taos/taos.cfg
+ /etc/taos:
+ bind: $SNAP_DATA/etc/taos
hooks:
install:
- plugs: [systemfiles]
-
+ plugs: [systemfiles, historyfile]
diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt
index 5b5fb3435d6c1261c2372bddcd85a0bcd3a110f9..daf7c5e5349f42730721b3f2b6baad5d0485361e 100644
--- a/src/client/CMakeLists.txt
+++ b/src/client/CMakeLists.txt
@@ -23,12 +23,8 @@ IF (TD_LINUX)
#set version of .so
#VERSION so version
#SOVERSION api version
- execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
- execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
- OUTPUT_VARIABLE
- VERSION_INFO)
- MESSAGE(STATUS "build version ${VERSION_INFO}")
- SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1)
+ #MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
+ SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
ADD_SUBDIRECTORY(tests)
@@ -65,11 +61,7 @@ ELSEIF (TD_DARWIN)
#set version of .so
#VERSION so version
#SOVERSION api version
- execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
- execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
- OUTPUT_VARIABLE
- VERSION_INFO)
- MESSAGE(STATUS "build version ${VERSION_INFO}")
- SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1)
+ #MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
+ SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
ENDIF ()
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 6bdc2c86aee8c5739da4634bf34736fea8f5c6ec..9b31b8fc6aa4114caa262b4b81a60a0ae757cb2c 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -39,7 +39,6 @@ extern "C" {
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
-#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
typedef struct SParsedColElem {
int16_t colIndex;
@@ -70,6 +69,8 @@ typedef struct SJoinSupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
+ char intervalTimeUnit;
+ char slidingTimeUnit;
int64_t intervalTime; // interval time
int64_t slidingTime; // sliding time
SLimitVal limit; // limit info
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index d2c52e972af5aa65bc97497f93936187fc97164f..2490e3d7569dfb45b5c94bb9ba2fd67b6860c3f0 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -229,8 +229,9 @@ typedef struct STableDataBlocks {
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
- uint32_t type; // query/insert type
+ char intervalTimeUnit;
char slidingTimeUnit;
+ uint32_t type; // query/insert type
STimeWindow window; // query time window
int64_t intervalTime; // aggregation time interval
int64_t slidingTime; // sliding window in mseconds
@@ -366,6 +367,8 @@ typedef struct SSqlStream {
uint32_t streamId;
char listed;
bool isProject;
+ char intervalTimeUnit;
+ char slidingTimeUnit;
int16_t precision;
int64_t num; // number of computing count
@@ -379,7 +382,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
- int64_t interval;
+ int64_t intervalTime;
int64_t slidingTime;
void * pTimer;
@@ -468,7 +471,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
// user defined constant value output columns
- if (pInfo->pSqlExpr->colInfo.flag == TSDB_COL_UDC) {
+ if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
pData = pInfo->pSqlExpr->param[1].pz;
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
diff --git a/src/client/src/taos.rc.in b/src/client/src/taos.rc.in
index 05dbd9bb7d0557999cba6a7fe24a52b94f70c54e..751be85fd01ebf36546a05453df4f29f1937135f 100644
--- a/src/client/src/taos.rc.in
+++ b/src/client/src/taos.rc.in
@@ -1,6 +1,6 @@
1 VERSIONINFO
- FILEVERSION ${TD_VER_1}, ${TD_VER_2}, ${TD_VER_3}
- PRODUCTVERSION ${TD_VER_1}, ${TD_VER_2}, ${TD_VER_3}
+ FILEVERSION ${TD_VER_NUMBER}
+ PRODUCTVERSION ${TD_VER_NUMBER}
FILEFLAGSMASK 0x17L
#ifdef _DEBUG
FILEFLAGS 0x1L
@@ -16,12 +16,12 @@ BEGIN
BLOCK "040904b0"
BEGIN
VALUE "FileDescription", "Native C Driver for TDengine"
- VALUE "FileVersion", "${TD_VER_1}, ${TD_VER_2}, ${TD_VER_3}"
+ VALUE "FileVersion", "${TD_VER_NUMBER}"
VALUE "InternalName", "taos.dll(${TD_VER_CPUTYPE})"
VALUE "LegalCopyright", "Copyright (C) 2020 TAOS Data"
VALUE "OriginalFilename", ""
VALUE "ProductName", "taos.dll(${TD_VER_CPUTYPE})"
- VALUE "ProductVersion", "${TD_VER_1}.${TD_VER_2}.${TD_VER_3}.${TD_VER_4}"
+ VALUE "ProductVersion", "${TD_VER_NUMBER}"
END
END
BLOCK "VarFileInfo"
diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c
index 7f6ce1ed0e190d6681983bb9b8e8f292ff18b9ef..4b31a8001fba55b3f40bd43fefba4326102ef919 100644
--- a/src/client/src/tscFunctionImpl.c
+++ b/src/client/src/tscFunctionImpl.c
@@ -1648,9 +1648,10 @@ static void last_function(SQLFunctionCtx *pCtx) {
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
- continue;
+ if (!pCtx->requireNull) {
+ continue;
+ }
}
-
memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes);
TSKEY ts = pCtx->ptsList[i];
@@ -1721,7 +1722,9 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
- continue;
+ if (!pCtx->requireNull) {
+ continue;
+ }
}
last_data_assign_impl(pCtx, data, i);
@@ -2034,7 +2037,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) {
tValuePair **tvp = pRes->res;
int32_t step = QUERY_ASC_FORWARD_STEP;
- int32_t len = GET_RES_INFO(pCtx)->numOfRes;
+ int32_t len = (int32_t)(GET_RES_INFO(pCtx)->numOfRes);
switch (type) {
case TSDB_DATA_TYPE_INT: {
@@ -2408,10 +2411,10 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
// user specify the order of output by sort the result according to timestamp
if (pCtx->param[1].i64Key == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
__compar_fn_t comparator = (pCtx->param[2].i64Key == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn;
- qsort(tvp, pResInfo->numOfRes, POINTER_BYTES, comparator);
+ qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
} else if (pCtx->param[1].i64Key > PRIMARYKEY_TIMESTAMP_COL_INDEX) {
__compar_fn_t comparator = (pCtx->param[2].i64Key == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn;
- qsort(tvp, pResInfo->numOfRes, POINTER_BYTES, comparator);
+ qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator);
}
GET_TRUE_DATA_TYPE();
diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c
index 759c08532a5d4b265d252b3a174a3aed0c0a9999..39a757795eae4d94b33284d5f7d223b32ee13a66 100644
--- a/src/client/src/tscLocalMerge.c
+++ b/src/client/src/tscLocalMerge.c
@@ -963,7 +963,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
- memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, pField->bytes * pRes->numOfRows);
+ memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
}
pRes->numOfRowsGroup += pRes->numOfRows;
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index 6ff97e9d00ad9cbf8cd6fdc92713b111a497c321..b8c38302046c8df4ea33d98633dd82c4e3a0ff40 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
- pSdesc->stime = htobe64(pStream->stime - pStream->interval);
+ pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->slidingTime);
- pSdesc->interval = htobe64(pStream->interval);
+ pSdesc->interval = htobe64(pStream->intervalTime);
pHeartbeat->numOfStreams++;
pSdesc++;
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index c0e6d7f44e633c286e0907d4b11f70d6ff2cc8ea..05f32d0ab89eaff63d372be1b312b888d49513b1 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -33,6 +33,8 @@
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
+#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
+
// -1 is tbname column index, so here use the -3 as the initial value
#define COLUMN_INDEX_INITIAL_VAL (-3)
#define COLUMN_INDEX_INITIALIZER \
@@ -45,6 +47,10 @@ typedef struct SColumnList { // todo refactor
SColumnIndex ids[TSDB_MAX_COLUMNS];
} SColumnList;
+typedef struct SConvertFunc {
+ int32_t originFuncId;
+ int32_t execFuncId;
+} SConvertFunc;
static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex);
static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
@@ -587,21 +593,20 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
// interval is not null
SStrToken* t = &pQuerySql->interval;
- if (getTimestampInUsFromStr(t->z, t->n, &pQueryInfo->intervalTime) != TSDB_CODE_SUCCESS) {
+ if (parseDuration(t->z, t->n, &pQueryInfo->intervalTime, &pQueryInfo->intervalTimeUnit) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- // if the unit of time window value is millisecond, change the value from microsecond
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
- }
-
- /* parser has filter the illegal type, no need to check here */
- pQueryInfo->slidingTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1];
+ if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
+ // if the unit of time window value is millisecond, change the value from microsecond
+ if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
+ pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
+ }
- // interval cannot be less than 10 milliseconds
- if (pQueryInfo->intervalTime < tsMinIntervalTime) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ // interval cannot be less than 10 milliseconds
+ if (pQueryInfo->intervalTime < tsMinIntervalTime) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
}
// for top/bottom + interval query, we do not add additional timestamp column in the front
@@ -666,6 +671,7 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
const char* msg0 = "sliding value too small";
const char* msg1 = "sliding value no larger than the interval value";
const char* msg2 = "sliding value can not less than 1% of interval value";
+ const char* msg3 = "does not support sliding when interval is natual month/year";
const static int32_t INTERVAL_SLIDING_FACTOR = 100;
@@ -673,21 +679,27 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
SStrToken* pSliding = &pQuerySql->sliding;
- if (pSliding->n != 0) {
- getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->slidingTime /= 1000;
- }
+ if (pSliding->n == 0) {
+ pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
+ pQueryInfo->slidingTime = pQueryInfo->intervalTime;
+ return TSDB_CODE_SUCCESS;
+ }
- if (pQueryInfo->slidingTime < tsMinSlidingTime) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
- }
+ if (pQueryInfo->intervalTimeUnit == 'n' || pQueryInfo->intervalTimeUnit == 'y') {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
- if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
- } else {
- pQueryInfo->slidingTime = pQueryInfo->intervalTime;
+ getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
+ if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
+ pQueryInfo->slidingTime /= 1000;
+ }
+
+ if (pQueryInfo->slidingTime < tsMinSlidingTime) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ }
+
+ if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) {
@@ -1303,7 +1315,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, PRIMARYKEY_TIMESTAMP_COL_INDEX);
// add the timestamp column into the output columns
- int32_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
tscAddSpecialColumnForSelect(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL);
SFieldSupInfo* pSupInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, numOfCols);
@@ -1501,13 +1513,13 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
return TSDB_CODE_SUCCESS;
}
-static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName,
+static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, SConvertFunc cvtFunc, char* aliasName,
int32_t resColIdx, SColumnIndex* pColIndex) {
int16_t type = 0;
int16_t bytes = 0;
-
char columnName[TSDB_COL_NAME_LEN] = {0};
const char* msg1 = "not support column types";
+ int32_t functionID = cvtFunc.execFuncId;
if (functionID == TSDB_FUNC_SPREAD) {
if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY ||
@@ -1523,16 +1535,21 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
type = pSchema[pColIndex->columnIndex].type;
bytes = pSchema[pColIndex->columnIndex].bytes;
}
-
+
if (aliasName != NULL) {
tstrncpy(columnName, aliasName, sizeof(columnName));
} else {
- getRevisedName(columnName, functionID, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
+ getRevisedName(columnName, cvtFunc.originFuncId, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
}
+
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
+ if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) {
+ pExpr->colInfo.flag |= TSDB_COL_NULL;
+ }
+
// set reverse order scan data blocks for last query
if (functionID == TSDB_FUNC_LAST) {
pExpr->numOfParams = 1;
@@ -1766,7 +1783,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
-
+ SConvertFunc cvtFunc = {.originFuncId = functionID, .execFuncId = functionID};
+ if (functionID == TSDB_FUNC_LAST_ROW && TSWINDOW_IS_EQUAL(pQueryInfo->window,TSWINDOW_INITIALIZER)) {
+ cvtFunc.execFuncId = TSDB_FUNC_LAST;
+ }
if (!requireAllFields) {
if (pItem->pNode->pParam->nExpr < 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@@ -1798,7 +1818,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) {
index.columnIndex = j;
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex++, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex++, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -1815,8 +1835,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
-
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex + i, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1853,7 +1872,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) {
SColumnIndex index = {.tableIndex = j, .columnIndex = i};
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -4675,7 +4694,9 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg0 = "sample interval can not be less than 10ms.";
const char* msg1 = "functions not allowed in select clause";
- if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10) {
+ if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10 &&
+ pQueryInfo->intervalTimeUnit != 'n' &&
+ pQueryInfo->intervalTimeUnit != 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
@@ -5238,7 +5259,7 @@ static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId)
for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, j);
- if (columnId == pColIndex->colId && pColIndex->flag == TSDB_COL_TAG) {
+ if (columnId == pColIndex->colId && TSDB_COL_IS_TAG(pColIndex->flag )) {
return true;
}
}
@@ -5537,7 +5558,6 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
}
}
-
int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "only one expression allowed";
const char* msg2 = "invalid expression in select clause";
@@ -6090,6 +6110,10 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2);
+
+ if (pQuerySql->pWhere) {
+ pQueryInfo->window = TSWINDOW_INITIALIZER;
+ }
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -6173,7 +6197,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- if (pQueryInfo->intervalTime > 0) {
+ if (pQueryInfo->intervalTime > 0 && pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_INTERVAL_TIME_WINDOW) {
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 6dcc7086b05e53c99c98863ae6cc7ed229344642..1f042b59d6ae09edf8eddd0a450fe1becd6be033 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -669,6 +669,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
+ pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->numOfTags = htonl(numOfTags);
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index 9dd47888d21d7626646a377429a66beadda837ea..79e00110932682d60cd6f4fe561fc4eb41cc18f1 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -46,22 +46,23 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) {
return true;
}
-static int64_t tscGetRetryDelayTime(int64_t slidingTime, int16_t prec) {
+static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, int16_t prec) {
float retryRangeFactor = 0.3f;
-
- // change to ms
- if (prec == TSDB_TIME_PRECISION_MICRO) {
- slidingTime = slidingTime / 1000;
- }
-
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
- if (slidingTime < retryDelta) {
- return slidingTime;
- } else {
- return retryDelta;
+ if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
+ // change to ms
+ if (prec == TSDB_TIME_PRECISION_MICRO) {
+ slidingTime = slidingTime / 1000;
+ }
+
+ if (slidingTime < retryDelta) {
+ return slidingTime;
+ }
}
+
+ return retryDelta;
}
static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
@@ -86,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
- int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
+ int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
@@ -131,13 +132,17 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
if (etime > pStream->etime) {
etime = pStream->etime;
+ } else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
+ etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
} else {
- etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval;
+ etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
}
pQueryInfo->window.ekey = etime;
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
int64_t timer = pStream->slidingTime;
- if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
+ if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
+ timer = 86400 * 1000l;
+ } else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer /= 1000l;
}
tscSetRetryTimer(pStream, pSql, timer);
@@ -157,7 +162,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
- int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
+ int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
@@ -218,7 +223,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
SSqlObj * pSql = (SSqlObj *)res;
if (pSql == NULL || numOfRows < 0) {
- int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
+ int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
@@ -241,7 +246,11 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
if (!pStream->isProject) {
- pStream->stime += pStream->slidingTime;
+ if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
+ pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
+ } else {
+ pStream->stime += pStream->slidingTime;
+ }
}
// actually only one row is returned. this following is not necessary
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
@@ -301,7 +310,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
now + timer, timer, delay, pStream->stime, etime);
} else {
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
- pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1);
+ pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
@@ -311,23 +320,26 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
}
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
- int64_t delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
-
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
- if (delayDelta > maxDelay) {
- delayDelta = maxDelay;
- }
-
- int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
- if (maxDelay > remainTimeWindow) {
- maxDelay = (int64_t)(remainTimeWindow / 1.5f);
+ int64_t delayDelta = maxDelay;
+ if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
+ delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
+ if (delayDelta > maxDelay) {
+ delayDelta = maxDelay;
+ }
+ int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
+ if (maxDelay > remainTimeWindow) {
+ maxDelay = (int64_t)(remainTimeWindow / 1.5f);
+ }
}
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
- assert(currentDelay < pStream->slidingTime);
+ if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
+ assert(currentDelay < pStream->slidingTime);
+ }
return currentDelay;
}
@@ -354,7 +366,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
return;
}
} else {
- if ((pStream->stime - pStream->interval) >= pStream->etime) {
+ int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
+ if (stime >= pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
@@ -387,24 +400,24 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
- if (pQueryInfo->intervalTime < minIntervalTime) {
+ if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
}
- pStream->interval = pQueryInfo->intervalTime; // it shall be derived from sql string
+ pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
+ pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
- if (pQueryInfo->slidingTime == 0) {
+ if (pQueryInfo->slidingTime <= 0) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
+ pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
- if (pQueryInfo->slidingTime == -1) {
- pQueryInfo->slidingTime = pQueryInfo->intervalTime;
- } else if (pQueryInfo->slidingTime < minSlidingTime) {
+ if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
@@ -418,6 +431,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
}
+ pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pStream->slidingTime = pQueryInfo->slidingTime;
if (pStream->isProject) {
@@ -431,7 +445,7 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
if (pStream->isProject) {
// no data in table, flush all data till now to destination meter, 10sec delay
- pStream->interval = tsProjectExecInterval;
+ pStream->intervalTime = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
@@ -442,11 +456,15 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
}
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
- stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
- stime -= pStream->interval;
- tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
+ stime = pQueryInfo->window.skey;
+ if (stime == INT64_MIN) {
+ stime = (int64_t)taosGetTimestamp(pStream->precision);
+ stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
+ stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
+ tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
+ }
} else {
- int64_t newStime = (stime / pStream->interval) * pStream->interval;
+ int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
@@ -516,7 +534,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
- pStream, pTableMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, pSql->sqlstr);
+ pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index f8c4d779510c6efb79daf074294ab2146b0d41ac..e264fa9b331c9f923a445ac2bbbc67c1be11072f 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -178,6 +178,8 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
pSupporter->subqueryIndex = index;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
+ pSupporter->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
+ pSupporter->slidingTime = pQueryInfo->slidingTimeUnit;
pSupporter->intervalTime = pQueryInfo->intervalTime;
pSupporter->slidingTime = pQueryInfo->slidingTime;
pSupporter->limit = pQueryInfo->limit;
@@ -309,6 +311,8 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
// set the second stage sub query for join process
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
+ pQueryInfo->intervalTimeUnit = pSupporter->intervalTimeUnit;
+ pQueryInfo->slidingTimeUnit = pSupporter->slidingTimeUnit;
pQueryInfo->intervalTime = pSupporter->intervalTime;
pQueryInfo->slidingTime = pSupporter->slidingTime;
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 7b09ef5902f8ee11f88063f073020054b70cbc08..6aa620eed215487b146515ad94e082b734b40f07 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -1835,6 +1835,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pNewQueryInfo->command = pQueryInfo->command;
+ pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index 2a4ac3fc40d043ead8262b8b32b24287d88f09f1..beef9ff375b5339fb3335c60653ac465e40ce3f3 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -35,6 +35,8 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
+int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
+int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 965a548d26429432ac45d3e05376e61e02f72cc3..49c9e6b7263533777e04f7164fce7e6bb394b012 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -53,7 +53,7 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
s.type = pVal->nType;
if (s.type == TSDB_DATA_TYPE_BINARY || s.type == TSDB_DATA_TYPE_NCHAR) {
- s.bytes = pVal->nLen + VARSTR_HEADER_SIZE;
+ s.bytes = (int16_t)(pVal->nLen + VARSTR_HEADER_SIZE);
} else {
s.bytes = tDataTypeDesc[pVal->nType].nSize;
}
@@ -100,36 +100,123 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
+int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
+ key /= 1000;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ key /= 1000;
+ }
+
+ struct tm tm;
+ time_t t = (time_t)key;
+ localtime_r(&t, &tm);
+
+ if (timeUnit == 'y') {
+ intervalTime *= 12;
+ }
+
+ int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+
+ key = mktime(&tm) * 1000L;
+
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ key *= 1000L;
+ }
+
+ return key;
+}
+
+int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
+ skey /= 1000;
+ ekey /= 1000;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ skey /= 1000;
+ ekey /= 1000;
+ }
+ if (ekey < skey) {
+ int64_t tmp = ekey;
+ ekey = skey;
+ skey = tmp;
+ }
+
+ struct tm tm;
+ time_t t = (time_t)skey;
+ localtime_r(&t, &tm);
+ int smon = tm.tm_year * 12 + tm.tm_mon;
+
+ t = (time_t)ekey;
+ localtime_r(&t, &tm);
+ int emon = tm.tm_year * 12 + tm.tm_mon;
+
+ if (timeUnit == 'y') {
+ intervalTime *= 12;
+ }
+
+ return (emon - smon) / (int32_t)intervalTime;
+}
+
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
+ int64_t start = startTime;
+ if (timeUnit == 'n' || timeUnit == 'y') {
+ start /= 1000;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ start /= 1000;
+ }
+ struct tm tm;
+ time_t t = (time_t)start;
+ localtime_r(&t, &tm);
+ tm.tm_sec = 0;
+ tm.tm_min = 0;
+ tm.tm_hour = 0;
+ tm.tm_mday = 1;
+
+ if (timeUnit == 'y') {
+ tm.tm_mon = 0;
+ tm.tm_year = (int)(tm.tm_year / slidingTime * slidingTime);
+ } else {
+ int mon = tm.tm_year * 12 + tm.tm_mon;
+ mon = (int)(mon / slidingTime * slidingTime);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+ }
- int64_t delta = startTime - intervalTime;
- int32_t factor = delta > 0? 1:-1;
-
- int64_t start = (delta / slidingTime + factor) * slidingTime;
- if (!(timeUnit == 'u' || timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) {
- /*
- * here we revised the start time of day according to the local time zone,
- * but in case of DST, the start time of one day need to be dynamically decided.
- */
- // todo refactor to extract function that is available for Linux/Windows/Mac platform
-#if defined(WINDOWS) && _MSC_VER >= 1900
- // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
- int64_t timezone = _timezone;
- int32_t daylight = _daylight;
- char** tzname = _tzname;
-#endif
+ start = mktime(&tm) * 1000L;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ start *= 1000L;
+ }
+ } else {
+ int64_t delta = startTime - intervalTime;
+ int32_t factor = delta > 0? 1:-1;
+
+ start = (delta / slidingTime + factor) * slidingTime;
+
+ if (timeUnit == 'd' || timeUnit == 'w') {
+ /*
+ * here we revised the start time of day according to the local time zone,
+ * but in case of DST, the start time of one day need to be dynamically decided.
+ */
+ // todo refactor to extract function that is available for Linux/Windows/Mac platform
+ #if defined(WINDOWS) && _MSC_VER >= 1900
+ // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
+ int64_t timezone = _timezone;
+ int32_t daylight = _daylight;
+ char** tzname = _tzname;
+ #endif
+
+ int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
+ start += timezone * t;
+ }
- int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
- start += timezone * t;
+ int64_t end = start + intervalTime - 1;
+ if (end < startTime) {
+ start += slidingTime;
+ }
}
- int64_t end = start + intervalTime - 1;
- if (end < startTime) {
- start += slidingTime;
- }
return start;
}
diff --git a/src/connector/jdbc/src/test/java/TestPreparedStatement.java b/src/connector/jdbc/src/test/java/TestPreparedStatement.java
index 2e387206a41997eb629ead85b55fef9320ea7d14..1edb957493183c407b668bd68532e83fa1a64dba 100644
--- a/src/connector/jdbc/src/test/java/TestPreparedStatement.java
+++ b/src/connector/jdbc/src/test/java/TestPreparedStatement.java
@@ -12,7 +12,7 @@ public class TestPreparedStatement {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
- connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/?user=root&password=taosdata", properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
String rawSql = "select * from test.log0601";
// String[] params = new String[]{"ts", "c1"};
PreparedStatement pstmt = (TSDBPreparedStatement) connection.prepareStatement(rawSql);
diff --git a/src/connector/jdbc/src/test/java/TestTSDBDatabaseMetaData.java b/src/connector/jdbc/src/test/java/TestTSDBDatabaseMetaData.java
index f7e0e78441a9c2457a57605fe71e70a5dee609c2..39a08f0fe98a279e62ca07f20047813edeab56db 100644
--- a/src/connector/jdbc/src/test/java/TestTSDBDatabaseMetaData.java
+++ b/src/connector/jdbc/src/test/java/TestTSDBDatabaseMetaData.java
@@ -13,7 +13,7 @@ public class TestTSDBDatabaseMetaData {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
- connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/?user=root&password=taosdata", properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
dbMetaData = connection.getMetaData();
resSet = dbMetaData.getCatalogs();
while(resSet.next()) {
diff --git a/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java b/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java
index df730efa69f5937b938f2556d39517a0aa15b3bb..47acb200649be4c8c6af5817c2ef4c074a2a3fa0 100644
--- a/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java
+++ b/src/connector/jdbc/src/test/java/TestTSDBSubscribe.java
@@ -3,7 +3,6 @@ import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBResultSet;
import com.taosdata.jdbc.TSDBSubscribe;
-import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
@@ -17,12 +16,10 @@ public class TestTSDBSubscribe {
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- String cs = String.format("jdbc:TAOS://%s:0/%s?user=root&password=taosdata", host, database);
+ String cs = String.format("jdbc:TAOS://%s:0/%s", host, database);
return (TSDBConnection)DriverManager.getConnection(cs, properties);
}
-
-
public static void main(String[] args) throws Exception {
String usage = "java -Djava.ext.dirs=../ TestTSDBSubscribe [-host host] <-db database> <-topic topic> <-sql sql>";
if (args.length < 2) {
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java
index cb78a5ca0eea5d780392fca95c29fe364ffcdbc3..c49293c96b568d70644f548722cf83374c430949 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/BatchInsertTest.java
@@ -40,8 +40,7 @@ public class BatchInsertTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java
index 3fc8913ca3d56b7fb492b0ff20ddd85d474c901a..a54ece4eadc54e930a4af4faddb58dfef3ceb25a 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java
@@ -29,16 +29,14 @@ public class ConnectionTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
assertTrue(null != connection);
statement = connection.createStatement();
assertTrue(null != statement);
// try reconnect
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
try {
statement.execute("create database if not exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java
index 4c4d56020d8ea64e415001be9604808c5418b2af..284af3dfe75f7c436ec2ce875714afc235c525d8 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java
@@ -26,8 +26,7 @@ public class DatabaseMetaDataTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata",
- properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String sql = "drop database if exists " + dbName;
statement = (TSDBPreparedStatement) connection.prepareStatement(sql);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java
index 016eff1a359ccdb629dc9c27618ed3bee6091511..dbe16d9fea33be117cfa93a3ecd9a1ff4bd12cd8 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java
@@ -28,8 +28,7 @@ public class ImportTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java
index d391771269e57e602988e0857d49163f3a101372..0535214ac198685ad65edf338bc6d72f841731a8 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/PreparedStatementTest.java
@@ -33,8 +33,7 @@ public class PreparedStatementTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata",
- properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String sql = "drop database if exists " + dbName;
statement = (TSDBPreparedStatement) connection.prepareStatement(sql);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java
index a0b9c051c6241a32a60e4b12aa85b1f52ec42672..8067c547df148587766d87e204f5abad2bfb659d 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java
@@ -35,8 +35,7 @@ public class ResultSetTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java
index de21cc6195baeb41271b198ccbd1302635c0c030..1844a92b471b8c6faf593de269ca337cc01b2cec 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java
@@ -28,8 +28,7 @@ public class SelectTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java
index c1321d08fc7a46fc724c81bbd44d790edbe03265..6e01fb7c349bbab73fcc6878d85330ac39de2cfc 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java
@@ -31,8 +31,7 @@ public class StableTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java
index 4fe5c39486edfdbdf015cc0a44d7917dbdd79749..db7b8c8cb14cbc0d1ae5a5a3a3bf8df590c4bc2a 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java
@@ -30,8 +30,7 @@ public class StatementTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
index 2dc27adab72f1664b951c658209809a258b3a730..07b43d1227a55538026e3c7d8d35d1e6f5c9a416 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
@@ -32,8 +32,7 @@ public class SubscribeTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + "?user=root&password=taosdata"
- , properties);
+ connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java
index 9ea5a431a5ef6dca232467aa80d430c6ac33dd69..a0981063a5052f04b849b2187a78352a2c2560be 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java
@@ -12,7 +12,7 @@ public class TSDBDriverTest {
@Test
public void urlParserTest() throws SQLException {
TSDBDriver driver = new TSDBDriver();
- String url = "jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password";
+ String url = "jdbc:TSDB://127.0.0.1:0/db";
Properties properties = new Properties();
driver.parseURL(url, properties);
diff --git a/src/connector/python/linux/python2/dist/taos-1.4.15.linux-x86_64.tar.gz b/src/connector/python/linux/python2/dist/taos-1.4.15.linux-x86_64.tar.gz
deleted file mode 100644
index b9c4e9e5718f5b35956fc46e37a5a67d41a1ae3f..0000000000000000000000000000000000000000
Binary files a/src/connector/python/linux/python2/dist/taos-1.4.15.linux-x86_64.tar.gz and /dev/null differ
diff --git a/src/connector/python/linux/python2/dist/taos-1.4.15.tar.gz b/src/connector/python/linux/python2/dist/taos-1.4.15.tar.gz
deleted file mode 100644
index c50a957a2a6440342d37ad57b31a4a86d15cc2d4..0000000000000000000000000000000000000000
Binary files a/src/connector/python/linux/python2/dist/taos-1.4.15.tar.gz and /dev/null differ
diff --git a/src/connector/python/linux/python2/taos.egg-info/PKG-INFO b/src/connector/python/linux/python2/taos.egg-info/PKG-INFO
deleted file mode 100644
index 96bf9059fd076214c159e8824c3e060bb7c683df..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python2/taos.egg-info/PKG-INFO
+++ /dev/null
@@ -1,13 +0,0 @@
-Metadata-Version: 2.1
-Name: taos
-Version: 2.0.0
-Summary: TDengine python client package
-Home-page: https://github.com/pypa/sampleproject
-Author: Taosdata Inc.
-Author-email: support@taosdata.com
-License: UNKNOWN
-Description: # TDengine python client interface
-Platform: UNKNOWN
-Classifier: Programming Language :: Python :: 2
-Classifier: Operating System :: Linux
-Description-Content-Type: text/markdown
diff --git a/src/connector/python/linux/python2/taos.egg-info/SOURCES.txt b/src/connector/python/linux/python2/taos.egg-info/SOURCES.txt
deleted file mode 100644
index 23a38056c07fbfca16754614dc0389d6893893ff..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python2/taos.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-README.md
-setup.py
-taos/__init__.py
-taos/cinterface.py
-taos/connection.py
-taos/constants.py
-taos/cursor.py
-taos/dbapi.py
-taos/error.py
-taos.egg-info/PKG-INFO
-taos.egg-info/SOURCES.txt
-taos.egg-info/dependency_links.txt
-taos.egg-info/top_level.txt
\ No newline at end of file
diff --git a/src/connector/python/linux/python2/taos.egg-info/dependency_links.txt b/src/connector/python/linux/python2/taos.egg-info/dependency_links.txt
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python2/taos.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/connector/python/linux/python2/taos.egg-info/top_level.txt b/src/connector/python/linux/python2/taos.egg-info/top_level.txt
deleted file mode 100644
index 6b5f0c008b9a67f85944b090fb33fc84bfcaaf7b..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python2/taos.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-taos
diff --git a/src/connector/python/linux/python3/dist/taos-1.4.15.linux-x86_64.tar.gz b/src/connector/python/linux/python3/dist/taos-1.4.15.linux-x86_64.tar.gz
deleted file mode 100644
index 422fa8db5033ee3b1f3c586413a65ebeab22561d..0000000000000000000000000000000000000000
Binary files a/src/connector/python/linux/python3/dist/taos-1.4.15.linux-x86_64.tar.gz and /dev/null differ
diff --git a/src/connector/python/linux/python3/dist/taos-1.4.15.tar.gz b/src/connector/python/linux/python3/dist/taos-1.4.15.tar.gz
deleted file mode 100644
index 51622a16a53a5945ebc9566227ee8e59599c5783..0000000000000000000000000000000000000000
Binary files a/src/connector/python/linux/python3/dist/taos-1.4.15.tar.gz and /dev/null differ
diff --git a/src/connector/python/linux/python3/taos.egg-info/PKG-INFO b/src/connector/python/linux/python3/taos.egg-info/PKG-INFO
deleted file mode 100644
index 1e6c829ef1d8331852e89204596f60cd00a17dfc..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python3/taos.egg-info/PKG-INFO
+++ /dev/null
@@ -1,13 +0,0 @@
-Metadata-Version: 2.1
-Name: taos
-Version: 2.0.0
-Summary: TDengine python client package
-Home-page: https://github.com/pypa/sampleproject
-Author: Taosdata Inc.
-Author-email: support@taosdata.com
-License: UNKNOWN
-Description: # TDengine python client interface
-Platform: UNKNOWN
-Classifier: Programming Language :: Python :: 3
-Classifier: Operating System :: Linux
-Description-Content-Type: text/markdown
diff --git a/src/connector/python/linux/python3/taos.egg-info/SOURCES.txt b/src/connector/python/linux/python3/taos.egg-info/SOURCES.txt
deleted file mode 100644
index 23a38056c07fbfca16754614dc0389d6893893ff..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python3/taos.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-README.md
-setup.py
-taos/__init__.py
-taos/cinterface.py
-taos/connection.py
-taos/constants.py
-taos/cursor.py
-taos/dbapi.py
-taos/error.py
-taos.egg-info/PKG-INFO
-taos.egg-info/SOURCES.txt
-taos.egg-info/dependency_links.txt
-taos.egg-info/top_level.txt
\ No newline at end of file
diff --git a/src/connector/python/linux/python3/taos.egg-info/dependency_links.txt b/src/connector/python/linux/python3/taos.egg-info/dependency_links.txt
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python3/taos.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/connector/python/linux/python3/taos.egg-info/top_level.txt b/src/connector/python/linux/python3/taos.egg-info/top_level.txt
deleted file mode 100644
index 6b5f0c008b9a67f85944b090fb33fc84bfcaaf7b..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python3/taos.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-taos
diff --git a/src/connector/python/linux/python3/test.py b/src/connector/python/linux/python3/test.py
deleted file mode 100644
index b11b2c76a05abb845600b22f7cb6036e4d38dcc3..0000000000000000000000000000000000000000
--- a/src/connector/python/linux/python3/test.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from taos.cinterface import CTaosInterface
-from taos.error import *
-from taos.subscription import TDengineSubscription
-from taos.connection import TDengineConnection
-
-
-if __name__ == '__main__':
- conn = TDengineConnection(
- host="127.0.0.1", user="root", password="taosdata", database="test")
-
- # Generate a cursor object to run SQL commands
- sub = conn.subscribe(False, "test", "select * from log0601;", 1000)
-
- for i in range(100):
- print(i)
- data = sub.consume()
- for d in data:
- print(d)
-
- sub.close()
- conn.close()
diff --git a/src/connector/python/windows/python2/dist/taos-1.4.15-py2-none-any.whl b/src/connector/python/windows/python2/dist/taos-1.4.15-py2-none-any.whl
deleted file mode 100644
index 6e97d3a5e40a62e80b633ab7e728839b4299b506..0000000000000000000000000000000000000000
Binary files a/src/connector/python/windows/python2/dist/taos-1.4.15-py2-none-any.whl and /dev/null differ
diff --git a/src/connector/python/windows/python2/dist/taos-1.4.15.tar.gz b/src/connector/python/windows/python2/dist/taos-1.4.15.tar.gz
deleted file mode 100644
index dcc20602426b475eaa88cd9aa9877a53cd72a62e..0000000000000000000000000000000000000000
Binary files a/src/connector/python/windows/python2/dist/taos-1.4.15.tar.gz and /dev/null differ
diff --git a/src/connector/python/windows/python2/taos.egg-info/PKG-INFO b/src/connector/python/windows/python2/taos.egg-info/PKG-INFO
deleted file mode 100644
index 9babb669a7570ca0dd2dcf2e808b9c8a07a61adc..0000000000000000000000000000000000000000
--- a/src/connector/python/windows/python2/taos.egg-info/PKG-INFO
+++ /dev/null
@@ -1,13 +0,0 @@
-Metadata-Version: 2.1
-Name: taos
-Version: 2.0.0
-Summary: TDengine python client package
-Home-page: https://github.com/pypa/sampleproject
-Author: Taosdata Inc.
-Author-email: support@taosdata.com
-License: UNKNOWN
-Description: # TDengine python client interface
-Platform: UNKNOWN
-Classifier: Programming Language :: Python :: 2
-Classifier: Operating System :: Windows
-Description-Content-Type: text/markdown
diff --git a/src/connector/python/windows/python2/taos.egg-info/SOURCES.txt b/src/connector/python/windows/python2/taos.egg-info/SOURCES.txt
deleted file mode 100644
index 23a38056c07fbfca16754614dc0389d6893893ff..0000000000000000000000000000000000000000
--- a/src/connector/python/windows/python2/taos.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-README.md
-setup.py
-taos/__init__.py
-taos/cinterface.py
-taos/connection.py
-taos/constants.py
-taos/cursor.py
-taos/dbapi.py
-taos/error.py
-taos.egg-info/PKG-INFO
-taos.egg-info/SOURCES.txt
-taos.egg-info/dependency_links.txt
-taos.egg-info/top_level.txt
\ No newline at end of file
diff --git a/src/connector/python/windows/python2/taos.egg-info/dependency_links.txt b/src/connector/python/windows/python2/taos.egg-info/dependency_links.txt
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/connector/python/windows/python2/taos.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/connector/python/windows/python2/taos.egg-info/top_level.txt b/src/connector/python/windows/python2/taos.egg-info/top_level.txt
deleted file mode 100644
index 6b5f0c008b9a67f85944b090fb33fc84bfcaaf7b..0000000000000000000000000000000000000000
--- a/src/connector/python/windows/python2/taos.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-taos
diff --git a/src/connector/python/windows/python3/dist/taos-1.4.15-py3-none-any.whl b/src/connector/python/windows/python3/dist/taos-1.4.15-py3-none-any.whl
deleted file mode 100644
index 3c4c8a34a6afd0121fe6c560f0d9499d8dbf5a9c..0000000000000000000000000000000000000000
Binary files a/src/connector/python/windows/python3/dist/taos-1.4.15-py3-none-any.whl and /dev/null differ
diff --git a/src/connector/python/windows/python3/dist/taos-1.4.15.tar.gz b/src/connector/python/windows/python3/dist/taos-1.4.15.tar.gz
deleted file mode 100644
index 6294828f6db7768f6b307b36f1cc3d43b4da0749..0000000000000000000000000000000000000000
Binary files a/src/connector/python/windows/python3/dist/taos-1.4.15.tar.gz and /dev/null differ
diff --git a/src/connector/python/windows/python3/taos.egg-info/PKG-INFO b/src/connector/python/windows/python3/taos.egg-info/PKG-INFO
deleted file mode 100644
index 6213b0d165ed4d807e35038807c9f20b33cd921e..0000000000000000000000000000000000000000
--- a/src/connector/python/windows/python3/taos.egg-info/PKG-INFO
+++ /dev/null
@@ -1,13 +0,0 @@
-Metadata-Version: 2.1
-Name: taos
-Version: 2.0.0
-Summary: TDengine python client package
-Home-page: https://github.com/pypa/sampleproject
-Author: Hongze Cheng
-Author-email: hzcheng@taosdata.com
-License: UNKNOWN
-Description: # TDengine python client interface
-Platform: UNKNOWN
-Classifier: Programming Language :: Python :: 3
-Classifier: Operating System :: Windows
-Description-Content-Type: text/markdown
diff --git a/src/connector/python/windows/python3/taos.egg-info/SOURCES.txt b/src/connector/python/windows/python3/taos.egg-info/SOURCES.txt
deleted file mode 100644
index 0dbf14e5b66927ae3af1e99843c3d5ab84a222f7..0000000000000000000000000000000000000000
--- a/src/connector/python/windows/python3/taos.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-README.md
-setup.py
-taos/__init__.py
-taos/cinterface.py
-taos/connection.py
-taos/constants.py
-taos/cursor.py
-taos/dbapi.py
-taos/error.py
-taos.egg-info/PKG-INFO
-taos.egg-info/SOURCES.txt
-taos.egg-info/dependency_links.txt
-taos.egg-info/top_level.txt
\ No newline at end of file
diff --git a/src/connector/python/windows/python3/taos.egg-info/dependency_links.txt b/src/connector/python/windows/python3/taos.egg-info/dependency_links.txt
deleted file mode 100644
index d3f5a12faa99758192ecc4ed3fc22c9249232e86..0000000000000000000000000000000000000000
--- a/src/connector/python/windows/python3/taos.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/connector/python/windows/python3/taos.egg-info/top_level.txt b/src/connector/python/windows/python3/taos.egg-info/top_level.txt
deleted file mode 100644
index bb442feabdde9fc9c6e9bfca85ca25a1ba0e42ab..0000000000000000000000000000000000000000
--- a/src/connector/python/windows/python3/taos.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-taos
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index b61890347f8af8e37cf0664c83c02e55f829a27d..cd25ddcc5596b98e7ca93c15a9dcaf4d6b3a9608 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -85,7 +85,11 @@ extern const int32_t TYPE_BYTES[11];
#define TSDB_DATA_NULL_STR_L "null"
#define TSDB_DEFAULT_USER "root"
+#ifdef _TD_POWER_
+#define TSDB_DEFAULT_PASS "powerdb"
+#else
#define TSDB_DEFAULT_PASS "taosdata"
+#endif
#define TSDB_TRUE 1
#define TSDB_FALSE 0
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index a789d002bfd03587d2567924422287dd5a3ab0a0..951c511022e5df1bf7e385cde526b177a12812ad 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -140,6 +140,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_READY, 0, 0x033C, "Cluster no
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "Account already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "Invalid account")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT_OPTION, 0, 0x0342, "Invalid account options")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_EXPIRED, 0, 0x0343, "Account authorization has expired")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_ALREADY_EXIST, 0, 0x0350, "User already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_USER, 0, 0x0351, "Invalid user")
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index 761a267ce53f9b2c54e0ea993ebd346564feee29..e2df886320b89060920b0c691451ea480467b8e6 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -170,6 +170,13 @@ enum _mgmt_table {
#define TSDB_COL_NORMAL 0x0u // the normal column of the table
#define TSDB_COL_TAG 0x1u // the tag column type
#define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column
+#define TSDB_COL_NULL 0x4u // the column filter NULL or not
+
+#define TSDB_COL_IS_TAG(f) (((f&(~(TSDB_COL_NULL)))&TSDB_COL_TAG) != 0)
+#define TSDB_COL_IS_NORMAL_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_NORMAL)
+#define TSDB_COL_IS_UD_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_UDC)
+#define TSDB_COL_REQ_NULL(f) (((f)&TSDB_COL_NULL) != 0)
+
extern char *taosMsg[];
@@ -456,6 +463,7 @@ typedef struct {
int64_t intervalTime; // time interval for aggregation, in million second
int64_t intervalOffset; // start offset for interval query
int64_t slidingTime; // value for sliding window
+ char intervalTimeUnit;
char slidingTimeUnit; // time interval type, for revisement of interval(1d)
uint16_t tagCondLen; // tag length in current query
int16_t numOfGroupCols; // num of group by columns
diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt
index 3bacd426f3fc92b18d50e06033f3ad913375540f..77db79e22003d04701bf7417cc9ebc06b202533e 100644
--- a/src/kit/CMakeLists.txt
+++ b/src/kit/CMakeLists.txt
@@ -3,5 +3,3 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
-#ADD_SUBDIRECTORY(taosClusterTest)
-ADD_SUBDIRECTORY(taosnetwork)
diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt
index 0305d9f1cc082fb668b9b674d8dd180db08f1c1d..c86cac281c0283aac691555927d679b0ef4f4f64 100644
--- a/src/kit/shell/CMakeLists.txt
+++ b/src/kit/shell/CMakeLists.txt
@@ -24,7 +24,12 @@ ELSEIF (TD_WINDOWS)
LIST(APPEND SRC ./src/shellWindows.c)
ADD_EXECUTABLE(shell ${SRC})
TARGET_LINK_LIBRARIES(shell taos_static)
- SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
+
+ IF (TD_POWER)
+ SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME power)
+ ELSE ()
+ SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
+ ENDIF ()
ELSEIF (TD_DARWIN)
LIST(APPEND SRC ./src/shellEngine.c)
LIST(APPEND SRC ./src/shellMain.c)
diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h
index d47f87965b6494da79f9aab52f9d8b91d6169106..dd62df170a7c87f127eb7c52c1f580b7f460b445 100644
--- a/src/kit/shell/inc/shell.h
+++ b/src/kit/shell/inc/shell.h
@@ -50,6 +50,9 @@ typedef struct SShellArguments {
char* commands;
int abort;
int port;
+ int endPort;
+ int pktLen;
+ char* netTestRole;
} SShellArguments;
/**************** Function declarations ****************/
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index 8f7f4d9324d01778daef9958db2ce97634d2f223..c8df17d6aef78403221aba12248836572f1cfc9f 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -30,11 +30,22 @@
#include
/**************** Global variables ****************/
+#ifdef _TD_POWER_
+char CLIENT_VERSION[] = "Welcome to the PowerDB shell from %s, Client Version:%s\n"
+ "Copyright (c) 2017 by PowerDB, Inc. All rights reserved.\n\n";
+char PROMPT_HEADER[] = "power> ";
+
+char CONTINUE_PROMPT[] = " -> ";
+int prompt_size = 7;
+#else
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n";
char PROMPT_HEADER[] = "taos> ";
+
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 6;
+#endif
+
TAOS_RES *result = NULL;
SShellHistory history;
diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c
index c74eeb7f59de5d1b813072ec5b7f952d3c2ce632..6c09d5c9d06c1182bdd45b04fec551d8ecf99e4d 100644
--- a/src/kit/shell/src/shellLinux.c
+++ b/src/kit/shell/src/shellLinux.c
@@ -46,6 +46,9 @@ static struct argp_option options[] = {
{"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."},
{"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."},
{"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."},
+ {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, valid option: client | server."},
+ {"endport", 'e', "ENDPORT", 0, "Net test end port, default is 6042."},
+ {"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."},
{0}};
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
@@ -65,6 +68,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'P':
if (arg) {
tsDnodeShellPort = atoi(arg);
+ arguments->port = atoi(arg);
} else {
fprintf(stderr, "Invalid port\n");
return -1;
@@ -126,6 +130,29 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'd':
arguments->database = arg;
break;
+
+ case 'n':
+ arguments->netTestRole = arg;
+ break;
+
+ case 'e':
+ if (arg) {
+ arguments->endPort = atoi(arg);
+ } else {
+ fprintf(stderr, "Invalid end port\n");
+ return -1;
+ }
+ break;
+
+ case 'l':
+ if (arg) {
+ arguments->pktLen = atoi(arg);
+ } else {
+ fprintf(stderr, "Invalid packet length\n");
+ return -1;
+ }
+ break;
+
case OPT_ABORT:
arguments->abort = 1;
break;
diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c
index 44de6641f62c67a47660bbca6099f9bcf96cac8e..6cb7c669cc7a08434b2558588067d007b51b3595 100644
--- a/src/kit/shell/src/shellMain.c
+++ b/src/kit/shell/src/shellMain.c
@@ -15,6 +15,7 @@
#include "os.h"
#include "shell.h"
+#include "tnettest.h"
pthread_t pid;
@@ -60,7 +61,10 @@ SShellArguments args = {
.file = "\0",
.dir = "\0",
.threadNum = 5,
- .commands = NULL
+ .commands = NULL,
+ .endPort = 6042,
+ .pktLen = 1000,
+ .netTestRole = NULL
};
/*
@@ -75,6 +79,11 @@ int main(int argc, char* argv[]) {
shellParseArgument(argc, argv, &args);
+ if (args.netTestRole && args.netTestRole[0] != 0) {
+ taosNetTest(args.host, (uint16_t)args.port, (uint16_t)args.endPort, args.pktLen, args.netTestRole);
+ exit(0);
+ }
+
/* Initialize the shell */
TAOS* con = shellInit(&args);
if (con == NULL) {
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 6469990c780bac6b9d7ec1161459dd234bacff19..699e96428e201e998c1a4bb9cb8cd3c835e61364 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -89,7 +89,11 @@ typedef struct DemoArguments {
{0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 0},
{0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 1},
{0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2},
+ #ifdef _TD_POWER_
+ {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 3},
+ #else
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
+ #endif
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
{0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
@@ -104,7 +108,11 @@ typedef struct DemoArguments {
{0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 1000.", 10},
{0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 11},
{0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 100000.", 12},
+ #ifdef _TD_POWER_
+ {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 14},
+ #else
{0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14},
+ #endif
{0, 'x', 0, 0, "Insert only flag.", 13},
{0, 'O', "order", 0, "Insert mode--0: In order, 1: Out of order. Default is in order.", 14},
{0, 'R', "rate", 0, "Out of order data's rate--if order=1 Default 10, min: 0, max: 50.", 14},
@@ -279,7 +287,11 @@ typedef struct DemoArguments {
printf("%s%s\n", indent, "-u");
printf("%s%s%s\n", indent, indent, "user, The user name to use when connecting to the server. Default is 'root'.");
printf("%s%s\n", indent, "-p");
+ #ifdef _TD_POWER_
+ printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'powerdb'.");
+ #else
printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'taosdata'.");
+ #endif
printf("%s%s\n", indent, "-d");
printf("%s%s%s\n", indent, indent, "database, Destination database. Default is 'test'.");
printf("%s%s\n", indent, "-a");
@@ -309,7 +321,11 @@ typedef struct DemoArguments {
printf("%s%s\n", indent, "-n");
printf("%s%s%s\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 100000.");
printf("%s%s\n", indent, "-c");
+ #ifdef _TD_POWER_
+ printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/power/'.");
+ #else
printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'.");
+ #endif
printf("%s%s\n", indent, "-x");
printf("%s%s%s\n", indent, indent, "flag, Insert only flag.");
printf("%s%s\n", indent, "-O");
@@ -513,7 +529,11 @@ int main(int argc, char *argv[]) {
SDemoArguments arguments = { NULL, // host
0, // port
"root", // user
+ #ifdef _TD_POWER_
+ "powerdb", // password
+ #else
"taosdata", // password
+ #endif
"test", // database
1, // replica
"t", // tb_prefix
diff --git a/src/kit/taosnetwork/CMakeLists.txt b/src/kit/taosnetwork/CMakeLists.txt
deleted file mode 100644
index 9d2a5ba3f8622e9623083ee0c9788fe8716b3058..0000000000000000000000000000000000000000
--- a/src/kit/taosnetwork/CMakeLists.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
-PROJECT(TDengine)
-
-IF (TD_LINUX)
- AUX_SOURCE_DIRECTORY(. SRC)
- ADD_EXECUTABLE(taosClient client.c)
- ADD_EXECUTABLE(taosServer server.c)
- TARGET_LINK_LIBRARIES( taosServer -lpthread -lm -lrt )
- TARGET_LINK_LIBRARIES( taosClient -lpthread -lm -lrt )
-ENDIF ()
diff --git a/src/kit/taosnetwork/client.c b/src/kit/taosnetwork/client.c
deleted file mode 100644
index b7db2ba0a2ec607de1dde010fc89be1cda1c9c9a..0000000000000000000000000000000000000000
--- a/src/kit/taosnetwork/client.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define MAX_PKG_LEN (64*1000)
-#define BUFFER_SIZE (MAX_PKG_LEN + 1024)
-#define TEST_FQDN_LEN 128
-#define TEST_IPv4ADDR_LEN 16
-
-typedef struct {
- uint16_t port;
- uint32_t hostIp;
- char fqdn[TEST_FQDN_LEN];
- uint16_t pktLen;
-} info_s;
-
-typedef struct Arguments {
- char host[TEST_IPv4ADDR_LEN];
- char fqdn[TEST_FQDN_LEN];
- uint16_t port;
- uint16_t max_port;
- uint16_t pktLen;
-} SArguments;
-
-static struct argp_option options[] = {
- {0, 'h', "host ip", 0, "The host ip to connect to TDEngine. Default is localhost.", 0},
- {0, 'p', "port", 0, "The TCP or UDP port number to use for the connection. Default is 6030.", 1},
- {0, 'm', "max port", 0, "The max TCP or UDP port number to use for the connection. Default is 6042.", 2},
- {0, 'f', "host fqdn", 0, "The host fqdn to connect to TDEngine.", 3},
- {0, 'l', "test pkg len", 0, "The len of pkg for test. Default is 1000 Bytes, max not greater than 64k Bytes.\nNotes: This parameter must be consistent between the client and the server.", 3}};
-
-static error_t parse_opt(int key, char *arg, struct argp_state *state) {
- wordexp_t full_path;
- SArguments *arguments = state->input;
- switch (key) {
- case 'h':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid host ip %s\n", arg);
- return -1;
- }
- strcpy(arguments->host, full_path.we_wordv[0]);
- wordfree(&full_path);
- break;
- case 'p':
- arguments->port = atoi(arg);
- break;
- case 'm':
- arguments->max_port = atoi(arg);
- break;
- case 'l':
- arguments->pktLen = atoi(arg);
- break;
- case 'f':
- if (wordexp(arg, &full_path, 0) != 0) {
- fprintf(stderr, "Invalid host fqdn %s\n", arg);
- return -1;
- }
- strcpy(arguments->fqdn, full_path.we_wordv[0]);
- wordfree(&full_path);
- break;
-
- default:
- return ARGP_ERR_UNKNOWN;
- }
- return 0;
-}
-
-static struct argp argp = {options, parse_opt, 0, 0};
-
-int checkTcpPort(info_s *info) {
- int clientSocket;
-
- struct sockaddr_in serverAddr;
- char sendbuf[BUFFER_SIZE];
- char recvbuf[BUFFER_SIZE];
- int iDataNum = 0;
- if ((clientSocket = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
- printf("socket() fail: %s\n", strerror(errno));
- return -1;
- }
-
- // set send and recv overtime
- struct timeval timeout;
- timeout.tv_sec = 2; //s
- timeout.tv_usec = 0; //us
- if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
- perror("setsockopt send timer failed:");
- }
- if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
- perror("setsockopt recv timer failed:");
- }
-
- serverAddr.sin_family = AF_INET;
- serverAddr.sin_port = htons(info->port);
-
- serverAddr.sin_addr.s_addr = info->hostIp;
-
- //printf("=================================\n");
- if (connect(clientSocket, (struct sockaddr *)&serverAddr, sizeof(serverAddr)) < 0) {
- printf("connect() fail: %s\t", strerror(errno));
- return -1;
- }
- //printf("Connect to: %s:%d...success\n", host, port);
- memset(sendbuf, 0, BUFFER_SIZE);
- memset(recvbuf, 0, BUFFER_SIZE);
-
- struct in_addr ipStr;
- memcpy(&ipStr, &info->hostIp, 4);
- sprintf(sendbuf, "client send tcp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port);
- sprintf(sendbuf + info->pktLen - 16, "1122334455667788");
-
- send(clientSocket, sendbuf, info->pktLen, 0);
-
- memset(recvbuf, 0, BUFFER_SIZE);
- int nleft, nread;
- char *ptr = recvbuf;
- nleft = info->pktLen;
- while (nleft > 0) {
- nread = recv(clientSocket, ptr, BUFFER_SIZE, 0);;
-
- if (nread == 0) {
- break;
- } else if (nread < 0) {
- if (errno == EINTR) {
- continue;
- } else {
- printf("recv ack pkg from TCP port: %d fail:%s.\n", info->port, strerror(errno));
- close(clientSocket);
- return -1;
- }
- } else {
- nleft -= nread;
- ptr += nread;
- iDataNum += nread;
- }
- }
-
- if (iDataNum < info->pktLen) {
- printf("recv ack pkg len: %d, less than req pkg len: %d from tcp port: %d\n", iDataNum, info->pktLen, info->port);
- return -1;
- }
- //printf("Read ack pkg len:%d from tcp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8);
-
- close(clientSocket);
- return 0;
-}
-
-int checkUdpPort(info_s *info) {
- int clientSocket;
-
- struct sockaddr_in serverAddr;
- char sendbuf[BUFFER_SIZE];
- char recvbuf[BUFFER_SIZE];
- int iDataNum = 0;
- if ((clientSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
- perror("socket");
- return -1;
- }
-
- // set overtime
- struct timeval timeout;
- timeout.tv_sec = 2; //s
- timeout.tv_usec = 0; //us
- if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
- perror("setsockopt send timer failed:");
- }
- if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
- perror("setsockopt recv timer failed:");
- }
-
- serverAddr.sin_family = AF_INET;
- serverAddr.sin_port = htons(info->port);
- serverAddr.sin_addr.s_addr = info->hostIp;
-
- memset(sendbuf, 0, BUFFER_SIZE);
- memset(recvbuf, 0, BUFFER_SIZE);
-
- struct in_addr ipStr;
- memcpy(&ipStr, &info->hostIp, 4);
- sprintf(sendbuf, "client send udp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port);
- sprintf(sendbuf + info->pktLen - 16, "1122334455667788");
-
- socklen_t sin_size = sizeof(*(struct sockaddr *)&serverAddr);
-
- int code = sendto(clientSocket, sendbuf, info->pktLen, 0, (struct sockaddr *)&serverAddr, (int)sin_size);
- if (code < 0) {
- perror("sendto");
- return -1;
- }
-
- iDataNum = recvfrom(clientSocket, recvbuf, BUFFER_SIZE, 0, (struct sockaddr *)&serverAddr, &sin_size);
-
- if (iDataNum < info->pktLen) {
- printf("Read ack pkg len: %d, less than req pkg len: %d from udp port: %d\t\t", iDataNum, info->pktLen, info->port);
- return -1;
- }
-
- //printf("Read ack pkg len:%d from udp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8);
- close(clientSocket);
- return 0;
-}
-
-int32_t getIpFromFqdn(const char *fqdn, uint32_t* ip) {
- struct addrinfo hints = {0};
- hints.ai_family = AF_UNSPEC;
- hints.ai_socktype = SOCK_STREAM;
-
- struct addrinfo *result = NULL;
-
- int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result);
- if (result) {
- struct sockaddr *sa = result->ai_addr;
- struct sockaddr_in *si = (struct sockaddr_in*)sa;
- struct in_addr ia = si->sin_addr;
- *ip = ia.s_addr;
- freeaddrinfo(result);
- return 0;
- } else {
- printf("Failed get the ip address from fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
- return -1;
- }
-}
-
-void checkPort(uint32_t hostIp, uint16_t startPort, uint16_t maxPort, uint16_t pktLen) {
- int ret;
- info_s info;
- memset(&info, 0, sizeof(info_s));
- info.hostIp = hostIp;
- info.pktLen = pktLen;
-
- for (uint16_t port = startPort; port <= maxPort; port++) {
- //printf("test: %s:%d\n", info.host, port);
- printf("\n");
-
- info.port = port;
- ret = checkTcpPort(&info);
- if (ret != 0) {
- printf("tcp port:%d test fail.\t\n", port);
- } else {
- printf("tcp port:%d test ok.\t\t", port);
- }
-
- ret = checkUdpPort(&info);
- if (ret != 0) {
- printf("udp port:%d test fail.\t\n", port);
- } else {
- printf("udp port:%d test ok.\t\t", port);
- }
- }
-
- printf("\n");
- return ;
-}
-
-int main(int argc, char *argv[]) {
- SArguments arguments = {"127.0.0.1", "", 6030, 6042, 1000};
- int ret;
-
- argp_parse(&argp, argc, argv, 0, 0, &arguments);
- if (arguments.pktLen > MAX_PKG_LEN) {
- printf("test pkg len overflow: %d, max len not greater than %d bytes\n", arguments.pktLen, MAX_PKG_LEN);
- exit(0);
- }
-
- printf("host ip: %s\thost fqdn: %s\tport: %d\tmax_port: %d\tpkgLen: %d\n", arguments.host, arguments.fqdn, arguments.port, arguments.max_port, arguments.pktLen);
-
- if (arguments.host[0] != 0) {
- printf("\nstart connect to %s test:\n", arguments.host);
- checkPort(inet_addr(arguments.host), arguments.port, arguments.max_port, arguments.pktLen);
- printf("\n");
- }
-
- if (arguments.fqdn[0] != 0) {
- uint32_t hostIp = 0;
- ret = getIpFromFqdn(arguments.fqdn, &hostIp);
- if (ret) {
- printf("\n");
- return 0;
- }
- printf("\nstart connetc to %s test:\n", arguments.fqdn);
- checkPort(hostIp, arguments.port, arguments.max_port, arguments.pktLen);
- printf("\n");
- }
-
- return 0;
-}
diff --git a/src/kit/taosnetwork/server.c b/src/kit/taosnetwork/server.c
deleted file mode 100644
index 97be1d3b63369b26dedb0707d3a096b044f982ef..0000000000000000000000000000000000000000
--- a/src/kit/taosnetwork/server.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define MAX_PKG_LEN (64*1000)
-#define BUFFER_SIZE (MAX_PKG_LEN + 1024)
-
-typedef struct {
- int port;
- uint16_t pktLen;
-} info_s;
-
-typedef struct Arguments {
- char * host;
- uint16_t port;
- uint16_t max_port;
- uint16_t pktLen;
-} SArguments;
-
-static struct argp_option options[] = {
- {0, 'h', "host", 0, "The host to connect to TDEngine. Default is localhost.", 0},
- {0, 'p', "port", 0, "The TCP or UDP port number to use for the connection. Default is 6041.", 1},
- {0, 'm', "max port", 0, "The max TCP or UDP port number to use for the connection. Default is 6060.", 2},
- {0, 'l', "test pkg len", 0, "The len of pkg for test. Default is 1000 Bytes, max not greater than 64k Bytes.\nNotes: This parameter must be consistent between the client and the server.", 3}};
-
-static error_t parse_opt(int key, char *arg, struct argp_state *state) {
-
- SArguments *arguments = state->input;
- switch (key) {
- case 'h':
- arguments->host = arg;
- break;
- case 'p':
- arguments->port = atoi(arg);
- break;
- case 'm':
- arguments->max_port = atoi(arg);
- break;
- case 'l':
- arguments->pktLen = atoi(arg);
- break;
-
- default:
- return ARGP_ERR_UNKNOWN;
- }
- return 0;
-}
-
-static struct argp argp = {options, parse_opt, 0, 0};
-
-static void *bindTcpPort(void *sarg) {
- info_s *pinfo = (info_s *)sarg;
- int port = pinfo->port;
- int serverSocket;
-
- struct sockaddr_in server_addr;
- struct sockaddr_in clientAddr;
- int addr_len = sizeof(clientAddr);
- int client;
- char buffer[BUFFER_SIZE];
- int iDataNum = 0;
-
- if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) {
- printf("socket() fail: %s", strerror(errno));
- return NULL;
- }
-
- bzero(&server_addr, sizeof(server_addr));
- server_addr.sin_family = AF_INET;
- server_addr.sin_port = htons(port);
- server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
-
- if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
- printf("port:%d bind() fail: %s", port, strerror(errno));
- return NULL;
- }
-
- if (listen(serverSocket, 5) < 0) {
- printf("listen() fail: %s", strerror(errno));
- return NULL;
- }
-
- //printf("Bind port: %d success\n", port);
- while (1) {
- client = accept(serverSocket, (struct sockaddr *)&clientAddr, (socklen_t *)&addr_len);
- if (client < 0) {
- printf("accept() fail: %s", strerror(errno));
- continue;
- }
-
- memset(buffer, 0, BUFFER_SIZE);
- int nleft, nread;
- char *ptr = buffer;
- nleft = pinfo->pktLen;
- while (nleft > 0) {
- nread = recv(client, ptr, BUFFER_SIZE, 0);
-
- if (nread == 0) {
- break;
- } else if (nread < 0) {
- if (errno == EINTR) {
- continue;
- } else {
- printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", inet_ntoa(clientAddr.sin_addr), port, strerror(errno));
- close(serverSocket);
- return NULL;
- }
- } else {
- nleft -= nread;
- ptr += nread;
- iDataNum += nread;
- }
- }
-
- printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum);
- if (iDataNum > 0) {
- send(client, buffer, iDataNum, 0);
- }
- }
-
- close(serverSocket);
- return NULL;
-}
-
-static void *bindUdpPort(void *sarg) {
- info_s *pinfo = (info_s *)sarg;
- int port = pinfo->port;
- int serverSocket;
-
- struct sockaddr_in server_addr;
- struct sockaddr_in clientAddr;
- char buffer[BUFFER_SIZE];
- int iDataNum;
-
- if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
- perror("socket");
- return NULL;
- }
-
- bzero(&server_addr, sizeof(server_addr));
- server_addr.sin_family = AF_INET;
- server_addr.sin_port = htons(port);
- server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
-
- if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
- perror("connect");
- return NULL;
- }
-
- socklen_t sin_size;
-
- while (1) {
- memset(buffer, 0, BUFFER_SIZE);
-
- sin_size = sizeof(*(struct sockaddr *)&server_addr);
-
- iDataNum = recvfrom(serverSocket, buffer, BUFFER_SIZE, 0, (struct sockaddr *)&clientAddr, &sin_size);
-
- if (iDataNum < 0) {
- perror("recvfrom null");
- continue;
- }
- if (iDataNum > 0) {
- printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum);
- //printf("Read msg from udp:%s ... %s\n", buffer, buffer+iDataNum-16);
-
- sendto(serverSocket, buffer, iDataNum, 0, (struct sockaddr *)&clientAddr, (int)sin_size);
- }
- }
-
- close(serverSocket);
- return NULL;
-}
-
-
-int main(int argc, char *argv[]) {
- SArguments arguments = {"127.0.0.1", 6030, 6042, 1000};
- argp_parse(&argp, argc, argv, 0, 0, &arguments);
- if (arguments.pktLen > MAX_PKG_LEN) {
- printf("test pkg len overflow: %d, max len not greater than %d bytes\n", arguments.pktLen, MAX_PKG_LEN);
- exit(0);
- }
-
- int port = arguments.port;
-
- int num = arguments.max_port - arguments.port + 1;
-
- if (num < 0) {
- num = 1;
- }
- pthread_t *pids = malloc(2 * num * sizeof(pthread_t));
- info_s * tinfos = malloc(num * sizeof(info_s));
- info_s * uinfos = malloc(num * sizeof(info_s));
-
- for (size_t i = 0; i < num; i++) {
- info_s *tcpInfo = tinfos + i;
- tcpInfo->port = port + i;
- tcpInfo->pktLen = arguments.pktLen;
-
- if (pthread_create(pids + i, NULL, bindTcpPort, tcpInfo) != 0)
- {
- printf("create thread fail, port:%d.\n", port);
- exit(-1);
- }
-
- info_s *udpInfo = uinfos + i;
- udpInfo->port = port + i;
- if (pthread_create(pids + num + i, NULL, bindUdpPort, udpInfo) != 0)
- {
- printf("create thread fail, port:%d.\n", port);
- exit(-1);
- }
- }
-
- for (int i = 0; i < num; i++) {
- pthread_join(pids[i], NULL);
- pthread_join(pids[(num + i)], NULL);
- }
-}
diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c
index 53f3474c3c0564288556e34da4c17d893d05a40c..d63a5758686f5db46926e1b3c1bc97becd53d8ae 100644
--- a/src/mnode/src/mnodeMain.c
+++ b/src/mnode/src/mnodeMain.c
@@ -21,6 +21,7 @@
#include "tgrant.h"
#include "ttimer.h"
#include "tglobal.h"
+#include "mnode.h"
#include "dnode.h"
#include "mnodeDef.h"
#include "mnodeInt.h"
@@ -107,13 +108,18 @@ int32_t mnodeStartSystem() {
tsMgmtIsRunning = true;
mInfo("mnode is initialized successfully");
+
+ sdbUpdateSync();
+
return 0;
}
int32_t mnodeInitSystem() {
mnodeInitTimer();
- if (!mnodeNeedStart()) return 0;
- return mnodeStartSystem();
+ if (mnodeNeedStart()) {
+ return mnodeStartSystem();
+ }
+ return 0;
}
void mnodeCleanupSystem() {
@@ -159,14 +165,19 @@ static void mnodeCleanupTimer() {
static bool mnodeNeedStart() {
struct stat dirstat;
- bool fileExist = (stat(tsMnodeDir, &dirstat) == 0);
+ char mnodeFileName[TSDB_FILENAME_LEN * 2] = {0};
+ sprintf(mnodeFileName, "%s/wal/wal0", tsMnodeDir);
+
+ bool fileExist = (stat(mnodeFileName, &dirstat) == 0);
bool asMaster = (strcmp(tsFirst, tsLocalEp) == 0);
if (asMaster || fileExist) {
+ mDebug("mnode module start, asMaster:%d fileExist:%d", asMaster, fileExist);
return true;
+ } else {
+ mDebug("mnode module won't start, asMaster:%d fileExist:%d", asMaster, fileExist);
+ return false;
}
-
- return false;
}
bool mnodeIsRunning() {
diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c
index 9efb804734558278b9d6e9efeb29c4d03b65e21d..e3ed7daf8cf51a172aa2d1ef4581c07fb80121d7 100644
--- a/src/mnode/src/mnodeSdb.c
+++ b/src/mnode/src/mnodeSdb.c
@@ -291,6 +291,11 @@ static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
}
void sdbUpdateSync() {
+ if (!mnodeIsRunning()) {
+ mDebug("mnode not start yet, update sync info later");
+ return;
+ }
+
SSyncCfg syncCfg = {0};
int32_t index = 0;
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index e553853658640b68339dce309e10286f35292d37..f66ef6b7a36ac785459b9093f54b8e33ba6cc622 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -313,6 +313,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
code = TSDB_CODE_MND_DB_IN_DROPPING;
+ mnodeDecDbRef(pDb);
goto connect_over;
}
mnodeDecDbRef(pDb);
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 794958f7f04722b74e5b6a3139bb8c72f775cee3..03b1399ea75029f60d4026a1a2099288c805a2b2 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -294,6 +294,7 @@ static int32_t mnodeChildTableActionRestored() {
SSdbOper desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
mnodeDecTableRef(pTable);
+ mnodeDecDbRef(pDb);
continue;
}
mnodeDecDbRef(pDb);
@@ -1259,6 +1260,7 @@ static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow,
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -1323,6 +1325,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows,
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return 0;
}
@@ -2495,6 +2498,7 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -2548,6 +2552,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return 0;
}
@@ -2716,6 +2721,7 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -2768,6 +2774,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return 0;
}
diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c
index ff253c1935376e337125ab8ae5e5ef702651c208..aa6631ff837010e9fffb3663b5b50a57c61b557c 100644
--- a/src/mnode/src/mnodeVgroup.c
+++ b/src/mnode/src/mnodeVgroup.c
@@ -89,6 +89,7 @@ static int32_t mnodeVgroupActionInsert(SSdbOper *pOper) {
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("vgId:%d, db:%s status:%d, in dropping", pVgroup->vgId, pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -617,6 +618,7 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return TSDB_CODE_MND_DB_IN_DROPPING;
}
@@ -708,6 +710,7 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
if (pDb->status != TSDB_DB_STATUS_READY) {
mError("db:%s, status:%d, in dropping", pDb->name, pDb->status);
+ mnodeDecDbRef(pDb);
return 0;
}
@@ -784,7 +787,10 @@ void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
if (pTable->sid >= 1) {
taosIdPoolMarkStatus(pVgroup->idPool, pTable->sid);
pVgroup->numOfTables++;
- mnodeIncVgroupRef(pVgroup);
+ // The create vgroup message may be received later than the create table message
+ // and the writing order in sdb is therefore uncertain
+ // which will cause the reference count of the vgroup to be incorrect when restarting
+ // mnodeIncVgroupRef(pVgroup);
}
}
@@ -792,7 +798,10 @@ void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable) {
if (pTable->sid >= 1) {
taosFreeId(pVgroup->idPool, pTable->sid);
pVgroup->numOfTables--;
- mnodeDecVgroupRef(pVgroup);
+ // The create vgroup message may be received later than the create table message
+ // and the writing order in sdb is therefore uncertain
+ // which will cause the reference count of the vgroup to be incorrect when restarting
+ // mnodeDecVgroupRef(pVgroup);
}
}
diff --git a/src/os/inc/osTime.h b/src/os/inc/osTime.h
index cd2553f75336bc00225b4292638b3eb96f082ae9..97432ca24112805b1352b47e1ba2e34503477fb3 100644
--- a/src/os/inc/osTime.h
+++ b/src/os/inc/osTime.h
@@ -64,6 +64,7 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
}
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
+int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c
index 57634e468a87c9e482794fe1937a108d646cfaa1..9d8328a71b19c5a37eaa29fba8f2844c95dd77ca 100644
--- a/src/os/src/detail/osTime.c
+++ b/src/os/src/detail/osTime.c
@@ -319,6 +319,8 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
*time = factor * seconds + fraction;
return 0;
}
+
+
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
*result = val;
@@ -384,6 +386,23 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
}
+int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
+ errno = 0;
+
+ /* get the basic numeric value */
+ *duration = strtoll(token, NULL, 10);
+ if (errno != 0) {
+ return -1;
+ }
+
+ *unit = token[tokenLen - 1];
+ if (*unit == 'n' || *unit == 'y') {
+ return 0;
+ }
+
+ return getTimestampInUsFromStrImpl(*duration, *unit, duration);
+}
+
// internal function, when program is paused in debugger,
// one can call this function from debugger to print a
// timestamp as human readable string, for example (gdb):
diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c
index 811d98ad7f8c34851e70ccbd190a13b74d865703..14b40a1f1857ca7789829cb64f596ff349b7c997 100644
--- a/src/os/src/linux/linuxEnv.c
+++ b/src/os/src/linux/linuxEnv.c
@@ -18,15 +18,25 @@
#include "tglobal.h"
void osInit() {
+
+#ifdef _TD_POWER_
+ if (configDir[0] == 0) {
+ strcpy(configDir, "/etc/power");
+ }
+ strcpy(tsDataDir, "/var/lib/power");
+ strcpy(tsLogDir, "/var/log/power");
+ strcpy(tsScriptDir, "/etc/power");
+#else
if (configDir[0] == 0) {
strcpy(configDir, "/etc/taos");
}
+ strcpy(tsDataDir, "/var/lib/taos");
+ strcpy(tsLogDir, "/var/log/taos");
+ strcpy(tsScriptDir, "/etc/taos");
+#endif
strcpy(tsVnodeDir, "");
strcpy(tsDnodeDir, "");
strcpy(tsMnodeDir, "");
- strcpy(tsDataDir, "/var/lib/taos");
- strcpy(tsLogDir, "/var/log/taos");
- strcpy(tsScriptDir, "/etc/taos");
strcpy(tsOsName, "Linux");
}
\ No newline at end of file
diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c
index 5544c4ba39249edfee12ee10fa07d7aaa33813c9..8110a194904bbbc1166ac57de187de284db34a6b 100644
--- a/src/os/src/windows/wEnv.c
+++ b/src/os/src/windows/wEnv.c
@@ -22,16 +22,29 @@ extern void taosWinSocketInit();
void osInit() {
taosSetCoreDump();
+#ifdef _TD_POWER_
+ if (configDir[0] == 0) {
+ strcpy(configDir, "C:/PowerDB/cfg");
+ }
+
+ strcpy(tsVnodeDir, "C:/PowerDB/data");
+ strcpy(tsDataDir, "C:/PowerDB/data");
+ strcpy(tsLogDir, "C:/PowerDB/log");
+ strcpy(tsScriptDir, "C:/PowerDB/script");
+
+#else
if (configDir[0] == 0) {
strcpy(configDir, "C:/TDengine/cfg");
}
strcpy(tsVnodeDir, "C:/TDengine/data");
- strcpy(tsDnodeDir, "");
- strcpy(tsMnodeDir, "");
strcpy(tsDataDir, "C:/TDengine/data");
strcpy(tsLogDir, "C:/TDengine/log");
strcpy(tsScriptDir, "C:/TDengine/script");
+#endif
+
+ strcpy(tsDnodeDir, "");
+ strcpy(tsMnodeDir, "");
strcpy(tsOsName, "Windows");
taosWinSocketInit();
-}
\ No newline at end of file
+}
diff --git a/src/plugins/http/inc/httpQueue.h b/src/plugins/http/inc/httpQueue.h
new file mode 100644
index 0000000000000000000000000000000000000000..a4590719ff24d48eee875b2f2c4ff2f28a0a31f6
--- /dev/null
+++ b/src/plugins/http/inc/httpQueue.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_HTTP_QUEUE_H
+#define TDENGINE_HTTP_QUEUE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+
+bool httpInitResultQueue();
+void httpCleanupResultQueue();
+void httpDispatchToResultQueue();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/plugins/http/src/httpQueue.c b/src/plugins/http/src/httpQueue.c
new file mode 100644
index 0000000000000000000000000000000000000000..9625102f7450daf409d35aa532267f3f999d80ab
--- /dev/null
+++ b/src/plugins/http/src/httpQueue.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "os.h"
+#include "tqueue.h"
+#include "tnote.h"
+#include "taos.h"
+#include "tsclient.h"
+#include "httpInt.h"
+#include "httpContext.h"
+#include "httpSql.h"
+#include "httpResp.h"
+#include "httpAuth.h"
+#include "httpSession.h"
+
+typedef struct {
+ pthread_t thread;
+ int32_t workerId;
+} SHttpWorker;
+
+typedef struct {
+ int32_t num;
+ SHttpWorker *httpWorker;
+} SHttpWorkerPool;
+
+typedef struct {
+ void *param;
+ void *result;
+ int numOfRows;
+ void (*fp)(void *param, void *result, int numOfRows);
+} SHttpResult;
+
+static SHttpWorkerPool tsHttpPool;
+static taos_qset tsHttpQset;
+static taos_queue tsHttpQueue;
+
+void httpDispatchToResultQueue(void *param, TAOS_RES *result, int numOfRows, void (*fp)(void *param, void *result, int numOfRows)) {
+ if (tsHttpQueue != NULL) {
+ SHttpResult *pMsg = (SHttpResult *)taosAllocateQitem(sizeof(SHttpResult));
+ pMsg->param = param;
+ pMsg->result = result;
+ pMsg->numOfRows = numOfRows;
+ pMsg->fp = fp;
+ taosWriteQitem(tsHttpQueue, TAOS_QTYPE_RPC, pMsg);
+ } else {
+ (*fp)(param, result, numOfRows);
+ }
+}
+
+static void *httpProcessResultQueue(void *param) {
+ SHttpResult *pMsg;
+ int32_t type;
+ void *unUsed;
+
+ while (1) {
+ if (taosReadQitemFromQset(tsHttpQset, &type, (void **)&pMsg, &unUsed) == 0) {
+ httpDebug("httpResultQueue: got no message from qset, exiting...");
+ break;
+ }
+
+ httpDebug("context:%p, res:%p will be processed in result queue", pMsg->param, pMsg->result);
+ (*pMsg->fp)(pMsg->param, pMsg->result, pMsg->numOfRows);
+ taosFreeQitem(pMsg);
+ }
+
+ return NULL;
+}
+
+static bool httpAllocateResultQueue() {
+ tsHttpQueue = taosOpenQueue();
+ if (tsHttpQueue == NULL) return false;
+
+ taosAddIntoQset(tsHttpQset, tsHttpQueue, NULL);
+
+ for (int32_t i = 0; i < tsHttpPool.num; ++i) {
+ SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
+ pWorker->workerId = i;
+
+ pthread_attr_t thAttr;
+ pthread_attr_init(&thAttr);
+ pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
+
+ if (pthread_create(&pWorker->thread, &thAttr, httpProcessResultQueue, pWorker) != 0) {
+ httpError("failed to create thread to process http result queue, reason:%s", strerror(errno));
+ }
+
+ pthread_attr_destroy(&thAttr);
+ httpDebug("http result worker:%d is launched, total:%d", pWorker->workerId, tsHttpPool.num);
+ }
+
+ httpInfo("http result queue is opened");
+ return true;
+}
+
+static void httpFreeResultQueue() {
+ taosCloseQueue(tsHttpQueue);
+ tsHttpQueue = NULL;
+}
+
+bool httpInitResultQueue() {
+ tsHttpQset = taosOpenQset();
+
+ tsHttpPool.num = tsHttpMaxThreads;
+ tsHttpPool.httpWorker = (SHttpWorker *)calloc(sizeof(SHttpWorker), tsHttpPool.num);
+
+ if (tsHttpPool.httpWorker == NULL) return -1;
+ for (int32_t i = 0; i < tsHttpPool.num; ++i) {
+ SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
+ pWorker->workerId = i;
+ }
+
+ return httpAllocateResultQueue();
+}
+
+void httpCleanupResultQueue() {
+ httpFreeResultQueue();
+
+ for (int32_t i = 0; i < tsHttpPool.num; ++i) {
+ SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
+ if (pWorker->thread) {
+ taosQsetThreadResume(tsHttpQset);
+ }
+ }
+
+ for (int32_t i = 0; i < tsHttpPool.num; ++i) {
+ SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
+ if (pWorker->thread) {
+ pthread_join(pWorker->thread, NULL);
+ }
+ }
+
+ taosCloseQset(tsHttpQset);
+ free(tsHttpPool.httpWorker);
+
+ httpInfo("http result queue is closed");
+}
diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c
index 041fbdb92a6af689f14a71ff22e7537be64daa99..07cdea1380aaf40febc66f6d0e3891f7a577101d 100644
--- a/src/plugins/http/src/httpSql.c
+++ b/src/plugins/http/src/httpSql.c
@@ -24,12 +24,15 @@
#include "httpResp.h"
#include "httpAuth.h"
#include "httpSession.h"
+#include "httpQueue.h"
void *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, void **taos);
void httpProcessMultiSql(HttpContext *pContext);
-void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
+void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows);
+
+void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int numOfRows) {
HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return;
@@ -75,7 +78,11 @@ void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numO
}
}
-void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
+void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
+ httpDispatchToResultQueue(param, result, numOfRows, httpProcessMultiSqlRetrieveCallBackImp);
+}
+
+void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) {
HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return;
@@ -154,6 +161,10 @@ void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
}
}
+void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
+ httpDispatchToResultQueue(param, result, unUsedCode, httpProcessMultiSqlCallBackImp);
+}
+
void httpProcessMultiSql(HttpContext *pContext) {
HttpSqlCmds * multiCmds = pContext->multiCmds;
HttpEncodeMethod *encode = pContext->encodeMethod;
@@ -196,7 +207,9 @@ void httpProcessMultiSqlCmd(HttpContext *pContext) {
httpProcessMultiSql(pContext);
}
-void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
+void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows);
+
+void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int numOfRows) {
HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return;
@@ -243,7 +256,11 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num
}
}
-void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
+void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
+ httpDispatchToResultQueue(param, result, numOfRows, httpProcessSingleSqlRetrieveCallBackImp);
+}
+
+void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCode) {
HttpContext *pContext = (HttpContext *)param;
if (pContext == NULL) return;
@@ -306,6 +323,10 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode)
}
}
+void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
+ httpDispatchToResultQueue(param, result, unUsedCode, httpProcessSingleSqlCallBackImp);
+}
+
void httpProcessSingleSqlCmd(HttpContext *pContext) {
HttpSqlCmd * cmd = &pContext->singleCmd;
char * sql = cmd->nativSql;
diff --git a/src/plugins/http/src/httpSystem.c b/src/plugins/http/src/httpSystem.c
index 38bd8624b212d7c2004bf2eb89986be5e07cda5c..e51c8dd4f773397862483d9284e765a51d49c923 100644
--- a/src/plugins/http/src/httpSystem.c
+++ b/src/plugins/http/src/httpSystem.c
@@ -26,6 +26,7 @@
#include "httpServer.h"
#include "httpResp.h"
#include "httpHandle.h"
+#include "httpQueue.h"
#include "gcHandle.h"
#include "restHandle.h"
#include "tgHandle.h"
@@ -67,6 +68,11 @@ int httpStartSystem() {
return -1;
}
+ if (!httpInitResultQueue()) {
+ httpError("http init result queue failed");
+ return -1;
+ }
+
if (!httpInitContexts()) {
httpError("http init contexts failed");
return -1;
@@ -98,6 +104,8 @@ void httpCleanUpSystem() {
httpCleanUpConnect();
httpCleanupContexts();
httpCleanUpSessions();
+ httpCleanupResultQueue();
+
pthread_mutex_destroy(&tsHttpServer.serverMutex);
taosTFree(tsHttpServer.pThreads);
tsHttpServer.pThreads = NULL;
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 7093495763a76ac4b9d9f86f413fbbc27d273b2a..25fb04fb9a56bd9218b37f10cb5577d00a7fbd51 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -132,11 +132,12 @@ typedef struct SQueryCostInfo {
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
+ char intervalTimeUnit;
+ char slidingTimeUnit; // interval data type, used for daytime revise
SOrderVal order;
STimeWindow window;
int64_t intervalTime;
int64_t slidingTime; // sliding time for sliding window query
- char slidingTimeUnit; // interval data type, used for daytime revise
int16_t precision;
int16_t numOfOutput;
int16_t fillType;
diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h
index 65ab82883b0a4a280e85bbaf66929081615795d3..384d8079a7533e68dea39ff7549f71ad4b748879 100644
--- a/src/query/inc/tsqlfunction.h
+++ b/src/query/inc/tsqlfunction.h
@@ -168,6 +168,7 @@ typedef struct SQLFunctionCtx {
int16_t outputType;
int16_t outputBytes; // size of results, determined by function and input column data type
bool hasNull; // null value exist in current block
+ bool requireNull; // require null in some function
int16_t functionId; // function id
void * aInputElemBuf;
char * aOutputBuf; // final result output buffer, point to sdata->data
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 6d74583a2cd82eceffa6df41685119b6343f4276..08bc21c109b661bf0133bc1b8b13acdc5bc59f14 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -35,9 +35,7 @@
* forced to load primary column explicitly.
*/
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0)
-#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
-#define TSDB_COL_IS_NORMAL_COL(f) ((f) == TSDB_COL_NORMAL)
-#define TSDB_COL_IS_UD_COL(f) ((f) == TSDB_COL_UDC)
+
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
@@ -137,13 +135,44 @@ static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv);
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
-// previous time window may not be of the same size of pQuery->intervalTime
-#define GET_NEXT_TIMEWINDOW(_q, tw) \
- do { \
- int32_t factor = GET_FORWARD_DIRECTION_FACTOR((_q)->order.order); \
- (tw)->skey += ((_q)->slidingTime * factor); \
- (tw)->ekey = (tw)->skey + ((_q)->intervalTime - 1); \
- } while (0)
+static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
+ int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
+ if (pQuery->intervalTimeUnit != 'n' && pQuery->intervalTimeUnit != 'y') {
+ tw->skey += pQuery->slidingTime * factor;
+ tw->ekey = tw->skey + pQuery->intervalTime - 1;
+ return;
+ }
+
+ int64_t key = tw->skey / 1000, interval = pQuery->intervalTime;
+ if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
+ key /= 1000;
+ }
+ if (pQuery->intervalTimeUnit == 'y') {
+ interval *= 12;
+ }
+
+ struct tm tm;
+ time_t t = (time_t)key;
+ localtime_r(&t, &tm);
+
+ int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+ tw->skey = mktime(&tm) * 1000L;
+
+ mon = (int)(mon + interval);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+ tw->ekey = mktime(&tm) * 1000L;
+
+ if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
+ tw->skey *= 1000L;
+ tw->ekey *= 1000L;
+ }
+ tw->ekey -= 1;
+}
+
+#define GET_NEXT_TIMEWINDOW(_q, tw) getNextTimeWindow((_q), (tw))
#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
@@ -254,7 +283,7 @@ bool isGroupbyNormalCol(SSqlGroupbyExpr *pGroupbyExpr) {
for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) {
SColIndex *pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i);
- if (pColIndex->flag == TSDB_COL_NORMAL) {
+ if (TSDB_COL_IS_NORMAL_COL(pColIndex->flag)) {
//make sure the normal column locates at the second position if tbname exists in group by clause
if (pGroupbyExpr->numOfGroupCols > 1) {
assert(pColIndex->colIndex > 0);
@@ -275,7 +304,7 @@ int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) {
for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) {
SColIndex *pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i);
- if (pColIndex->flag == TSDB_COL_NORMAL) {
+ if (TSDB_COL_IS_NORMAL_COL(pColIndex->flag)) {
colId = pColIndex->colId;
break;
}
@@ -467,9 +496,13 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin
static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t ts, SQuery *pQuery) {
STimeWindow w = {0};
- if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
+ if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
w.skey = pWindowResInfo->prevSKey;
- w.ekey = w.skey + pQuery->intervalTime - 1;
+ if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else {
+ w.ekey = w.skey + pQuery->intervalTime - 1;
+ }
} else {
int32_t slot = curTimeWindowIndex(pWindowResInfo);
SWindowResult* pWindowRes = getWindowResult(pWindowResInfo, slot);
@@ -477,19 +510,24 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
}
if (w.skey > ts || w.ekey < ts) {
- int64_t st = w.skey;
+ if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ w.skey = taosGetIntervalStartTimestamp(ts, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
+ w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else {
+ int64_t st = w.skey;
- if (st > ts) {
- st -= ((st - ts + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
- }
+ if (st > ts) {
+ st -= ((st - ts + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ }
- int64_t et = st + pQuery->intervalTime - 1;
- if (et < ts) {
- st += ((ts - et + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
- }
+ int64_t et = st + pQuery->intervalTime - 1;
+ if (et < ts) {
+ st += ((ts - et + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ }
- w.skey = st;
- w.ekey = w.skey + pQuery->intervalTime - 1;
+ w.skey = st;
+ w.ekey = w.skey + pQuery->intervalTime - 1;
+ }
}
/*
@@ -814,14 +852,22 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
*/
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) {
TSKEY next = primaryKeys[startPos];
-
- pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
- pNext->skey = pNext->ekey - pQuery->intervalTime + 1;
+ if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
+ pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else {
+ pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
+ pNext->skey = pNext->ekey - pQuery->intervalTime + 1;
+ }
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) {
TSKEY next = primaryKeys[startPos];
-
- pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
- pNext->ekey = pNext->skey + pQuery->intervalTime - 1;
+ if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
+ pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
+ } else {
+ pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
+ pNext->ekey = pNext->skey + pQuery->intervalTime - 1;
+ }
}
return startPos;
@@ -1085,7 +1131,7 @@ static char *getGroupbyColumnData(SQuery *pQuery, int16_t *type, int16_t *bytes,
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
- if (pColIndex->flag == TSDB_COL_TAG) {
+ if (TSDB_COL_IS_TAG(pColIndex->flag)) {
continue;
}
@@ -1555,6 +1601,13 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
SColIndex* pIndex = &pSqlFuncMsg->colInfo;
+ if (TSDB_COL_REQ_NULL(pIndex->flag)) {
+ pCtx->requireNull = true;
+ pIndex->flag &= ~(TSDB_COL_NULL);
+ } else {
+ pCtx->requireNull = false;
+ }
+
int32_t index = pSqlFuncMsg->colInfo.colIndex;
if (TSDB_COL_IS_TAG(pIndex->flag)) {
if (pIndex->colId == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor
@@ -1574,6 +1627,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
pCtx->inputType = pQuery->colList[index].type;
}
+
assert(isValidDataType(pCtx->inputType));
pCtx->ptsOutputBuf = NULL;
@@ -1783,7 +1837,7 @@ static bool onlyQueryTags(SQuery* pQuery) {
if (functionId != TSDB_FUNC_TAGPRJ &&
functionId != TSDB_FUNC_TID_TAG &&
(!(functionId == TSDB_FUNC_COUNT && pExprInfo->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX)) &&
- (!(functionId == TSDB_FUNC_PRJ && pExprInfo->base.colInfo.flag == TSDB_COL_UDC))) {
+ (!(functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExprInfo->base.colInfo.flag)))) {
return false;
}
}
@@ -1804,7 +1858,8 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6
if (keyFirst > (INT64_MAX - pQuery->intervalTime)) {
assert(keyLast - keyFirst < pQuery->intervalTime);
win->ekey = INT64_MAX;
- return;
+ } else if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
+ win->ekey = taosAddNatualInterval(win->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
} else {
win->ekey = win->skey + pQuery->intervalTime - 1;
}
@@ -2782,14 +2837,14 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
tFilePage* pData = getResBufPage(pResultBuf, pi->pageId);
assert(pData->num > 0 && pData->num <= pRuntimeEnv->numOfRowsPerPage && pGroupResInfo->pos.rowId < pData->num);
- int32_t numOfRes = pData->num - pGroupResInfo->pos.rowId;
+ int32_t numOfRes = (int32_t)(pData->num - pGroupResInfo->pos.rowId);
if (numOfRes > pQuery->rec.capacity - offset) {
- numOfCopiedRows = pQuery->rec.capacity - offset;
+ numOfCopiedRows = (int32_t)(pQuery->rec.capacity - offset);
pGroupResInfo->pos.rowId += numOfCopiedRows;
done = true;
} else {
- numOfCopiedRows = pData->num;
+ numOfCopiedRows = (int32_t)pData->num;
pGroupResInfo->pos.pageId += 1;
pGroupResInfo->pos.rowId = 0;
@@ -2882,7 +2937,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo;
- pGroupResInfo->numOfDataPages = taosArrayGetSize(pageList);
+ pGroupResInfo->numOfDataPages = (int32_t)taosArrayGetSize(pageList);
pGroupResInfo->groupId = tid;
pGroupResInfo->pos.pageId = 0;
pGroupResInfo->pos.rowId = 0;
@@ -3047,7 +3102,7 @@ int32_t flushFromResultBuf(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupR
char* output = buf->data + pRuntimeEnv->offset[i] * pRuntimeEnv->numOfRowsPerPage;
char* src = ((char *) pQuery->sdata[i]->data) + offset * bytes;
- memcpy(output, src, buf->num * bytes);
+ memcpy(output, src, (size_t)(buf->num * bytes));
}
offset += rows;
@@ -3222,7 +3277,7 @@ void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) {
pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
}
- memset(pQuery->sdata[i]->data, 0, (size_t)pQuery->pSelectExpr[i].bytes * pQuery->rec.capacity);
+ memset(pQuery->sdata[i]->data, 0, (size_t)(pQuery->pSelectExpr[i].bytes * pQuery->rec.capacity));
}
initCtxOutputBuf(pRuntimeEnv);
@@ -3937,7 +3992,7 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) {
continue;
}
- pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
+ pResult->numOfRows = (uint16_t)(MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes));
}
}
}
@@ -4800,7 +4855,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
SWindowResult *pResult = &pWindowResInfo->pResult[i];
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
- pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
+ pResult->numOfRows = (uint16_t)(MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes));
}
}
@@ -5364,7 +5419,7 @@ static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pE
j += 1;
}
- } else if (pExprMsg->colInfo.flag == TSDB_COL_UDC) { // user specified column data
+ } else if (TSDB_COL_IS_UD_COL(pExprMsg->colInfo.flag)) { // user specified column data
return TSDB_UD_COLUMN_INDEX;
} else {
while (j < pQueryMsg->numOfCols) {
@@ -5572,7 +5627,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
int16_t functionId = pExprMsg->functionId;
if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG_DUMMY) {
- if (pExprMsg->colInfo.flag != TSDB_COL_TAG) { // ignore the column index check for arithmetic expression.
+ if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression.
code = TSDB_CODE_QRY_INVALID_MSG;
goto _cleanup;
}
@@ -6027,6 +6082,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
pQuery->pGroupbyExpr = pGroupbyExpr;
pQuery->intervalTime = pQueryMsg->intervalTime;
pQuery->slidingTime = pQueryMsg->slidingTime;
+ pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit;
pQuery->slidingTimeUnit = pQueryMsg->slidingTimeUnit;
pQuery->fillType = pQueryMsg->fillType;
pQuery->numOfTags = pQueryMsg->numOfTags;
@@ -6370,7 +6426,7 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
qDebug("QInfo:%p ts comp data return, file:%s, size:%"PRId64, pQInfo, pQuery->sdata[0]->data, s);
if (lseek(fd, 0, SEEK_SET) >= 0) {
- size_t sz = read(fd, data, s);
+ size_t sz = read(fd, data, (uint32_t)s);
if(sz < s) { // todo handle error
assert(0);
}
@@ -6857,7 +6913,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
int16_t type = 0, bytes = 0;
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
// not assign value in case of user defined constant output column
- if (pExprInfo[j].base.colInfo.flag == TSDB_COL_UDC) {
+ if (TSDB_COL_IS_UD_COL(pExprInfo[j].base.colInfo.flag)) {
continue;
}
diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c
index c1cfab3ea2c2ad9ed4d258f15930af8646d6f840..ddb63c5012318b9069e5b343803a4fc55012197f 100644
--- a/src/query/src/qFill.c
+++ b/src/query/src/qFill.c
@@ -55,7 +55,7 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
SFillColInfo* pColInfo = &pFillInfo->pFillCol[i];
pFillInfo->pData[i] = calloc(1, pColInfo->col.bytes * capacity);
- if (pColInfo->flag == TSDB_COL_TAG) {
+ if (TSDB_COL_IS_TAG(pColInfo->flag)) {
bool exists = false;
for(int32_t j = 0; j < k; ++j) {
if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) {
@@ -155,7 +155,7 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu
char* data = pInput->data + pCol->col.offset * pInput->num;
memcpy(pFillInfo->pData[i], data, (size_t)(pInput->num * pCol->col.bytes));
- if (pCol->flag == TSDB_COL_TAG) { // copy the tag value to tag value buffer
+ if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
for (int32_t j = 0; j < pFillInfo->numOfTags; ++j) {
SFillTagColInfo* pTag = &pFillInfo->pTags[j];
if (pTag->col.colId == pCol->col.colId) {
@@ -179,14 +179,22 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
- numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
+ if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
+ numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
+ } else {
+ numOfRes = taosCountNatualInterval(lastKey, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
+ }
assert(numOfRes >= numOfRows);
} else { // reach the end of data
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
return 0;
- } else { // the numOfRes rows are all filled with specified policy
+ }
+ // the numOfRes rows are all filled with specified policy
+ if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
numOfRes = (ABS(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
+ } else {
+ numOfRes = taosCountNatualInterval(ekey1, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
}
}
@@ -251,7 +259,7 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi
static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t num) {
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
- if (pCol->flag == TSDB_COL_NORMAL) {
+ if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
continue;
}
@@ -366,7 +374,12 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu
setTagsValue(pFillInfo, data, *num);
}
- pFillInfo->start += (pFillInfo->slidingTime * step);
+// TODO natual sliding time
+ if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
+ pFillInfo->start += (pFillInfo->slidingTime * step);
+ } else {
+ pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
+ }
pFillInfo->numOfCurrent++;
(*num) += 1;
@@ -446,7 +459,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// assign rows to dst buffer
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
- if (pCol->flag == TSDB_COL_TAG) {
+ if (TSDB_COL_IS_TAG(pCol->flag)) {
continue;
}
@@ -473,7 +486,12 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// set the tag value for final result
setTagsValue(pFillInfo, data, num);
- pFillInfo->start += (pFillInfo->slidingTime * step);
+ // TODO natual sliding time
+ if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
+ pFillInfo->start += (pFillInfo->slidingTime * step);
+ } else {
+ pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
+ }
pFillInfo->rowIdx += 1;
pFillInfo->numOfCurrent +=1;
diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c
index c09e244643a1222982d37811ca0cc1f4d924c600..9629f24cc2d9ebb58b28bad70b8ec6c2661ea80b 100644
--- a/src/query/src/qParserImpl.c
+++ b/src/query/src/qParserImpl.c
@@ -182,7 +182,7 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
if (pRight != NULL && pLeft != NULL) {
char* endPos = pRight->token.z + pRight->token.n;
pExpr->token.z = pLeft->token.z;
- pExpr->token.n = endPos - pExpr->token.z;
+ pExpr->token.n = (uint32_t)(endPos - pExpr->token.z);
pExpr->token.type = pLeft->token.type;
}
diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c
index 8d7730a75a00fb6625cbdf9bd8ab4683d8d93a51..b3e97459d36c68d48c04447b159473bd179a5dbb 100644
--- a/src/query/src/qResultbuf.c
+++ b/src/query/src/qResultbuf.c
@@ -142,7 +142,7 @@ static char* doFlushPageToDisk(SDiskbasedResultBuf* pResultBuf, SPageInfo* pg) {
}
- ret = fwrite(t, size, 1, pResultBuf->file);
+ ret = (int32_t)fwrite(t, size, 1, pResultBuf->file);
if (ret != size) { // todo handle the error case
}
diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c
index 518bb4083b71bffbdc52f8cc6f8b3cdaba2b35f5..b264f6cdc9d815a12cc5a3ab0e5c09c0d670bcdb 100644
--- a/src/query/src/qTsbuf.c
+++ b/src/query/src/qTsbuf.c
@@ -242,13 +242,13 @@ static void writeDataToDisk(STSBuf* pTSBuf) {
* both side has the compressed length is used to support load data forwards/backwords.
*/
int32_t metaLen = 0;
- metaLen += fwrite(&pBlock->tag.nType, 1, sizeof(pBlock->tag.nType), pTSBuf->f);
- metaLen += fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f);
+ metaLen += (int32_t)fwrite(&pBlock->tag.nType, 1, sizeof(pBlock->tag.nType), pTSBuf->f);
+ metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f);
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) {
- metaLen += fwrite(pBlock->tag.pz, 1, (size_t)pBlock->tag.nLen, pTSBuf->f);
+ metaLen += (int32_t)fwrite(pBlock->tag.pz, 1, (size_t)pBlock->tag.nLen, pTSBuf->f);
} else if (pBlock->tag.nType != TSDB_DATA_TYPE_NULL) {
- metaLen += fwrite(&pBlock->tag.i64Key, 1, sizeof(int64_t), pTSBuf->f);
+ metaLen += (int32_t)fwrite(&pBlock->tag.i64Key, 1, sizeof(int64_t), pTSBuf->f);
}
fwrite(&pBlock->numOfElem, sizeof(pBlock->numOfElem), 1, pTSBuf->f);
diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c
index 4c6d75ec144af9b412f8128f9ae3c63de9807bef..84f22918ecb8776bf52c262be913b72253dc688f 100644
--- a/src/tsdb/src/tsdbRWHelper.c
+++ b/src/tsdb/src/tsdbRWHelper.c
@@ -1130,8 +1130,15 @@ static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32
// Decode the data
if (comp) {
// // Need to decompress
- pDataCol->len = (*(tDataTypeDesc[pDataCol->type].decompFunc))(
- content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData, pDataCol->spaceSize, comp, buffer, bufferSize);
+ int tlen = (*(tDataTypeDesc[pDataCol->type].decompFunc))(content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData,
+ pDataCol->spaceSize, comp, buffer, bufferSize);
+ if (tlen <= 0) {
+ tsdbError("Failed to decompress column, file corrupted, len:%d comp:%d numOfRows:%d maxPoints:%d bufferSize:%d",
+ len, comp, numOfRows, maxPoints, bufferSize);
+ terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
+ return -1;
+ }
+ pDataCol->len = tlen;
if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) {
dataColSetOffset(pDataCol, numOfRows);
}
diff --git a/src/util/inc/tnettest.h b/src/util/inc/tnettest.h
new file mode 100644
index 0000000000000000000000000000000000000000..3fe1dfa9204fbbf85f193078b17e0bb4f9643848
--- /dev/null
+++ b/src/util/inc/tnettest.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_TNETTEST_H
+#define TDENGINE_TNETTEST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void taosNetTest(const char* host, uint16_t port, uint16_t endPort, int pktLen, const char* netTestRole);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // TDENGINE_TNETTEST_H
diff --git a/src/util/inc/tscompression.h b/src/util/inc/tscompression.h
index bd1ccf3ca5fd52cb8e6f596ff955579389cd249f..37d1e7b5909cf68f4dc7a25803f94442dc2cf7b0 100644
--- a/src/util/inc/tscompression.h
+++ b/src/util/inc/tscompression.h
@@ -65,7 +65,7 @@ static FORCE_INLINE int tsDecompressTinyint(const char *const input, int compres
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_TINYINT);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_TINYINT);
} else {
assert(0);
@@ -91,7 +91,7 @@ static FORCE_INLINE int tsDecompressSmallint(const char *const input, int compre
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_SMALLINT);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_SMALLINT);
} else {
assert(0);
@@ -117,7 +117,7 @@ static FORCE_INLINE int tsDecompressInt(const char *const input, int compressedS
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_INT);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_INT);
} else {
assert(0);
@@ -143,7 +143,7 @@ static FORCE_INLINE int tsDecompressBigint(const char *const input, int compress
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_BIGINT);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_BIGINT);
} else {
assert(0);
@@ -169,7 +169,7 @@ static FORCE_INLINE int tsDecompressBool(const char *const input, int compressed
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressBoolImp(input, nelements, output);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressBoolImp(buffer, nelements, output);
} else {
assert(0);
@@ -205,7 +205,7 @@ static FORCE_INLINE int tsDecompressFloat(const char *const input, int compresse
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressFloatImp(input, nelements, output);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressFloatImp(buffer, nelements, output);
} else {
assert(0);
@@ -231,7 +231,7 @@ static FORCE_INLINE int tsDecompressDouble(const char *const input, int compress
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressDoubleImp(input, nelements, output);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressDoubleImp(buffer, nelements, output);
} else {
assert(0);
@@ -257,7 +257,7 @@ static FORCE_INLINE int tsDecompressTimestamp(const char *const input, int compr
if (algorithm == ONE_STAGE_COMP) {
return tsDecompressTimestampImp(input, nelements, output);
} else if (algorithm == TWO_STAGE_COMP) {
- tsDecompressStringImp(input, compressedSize, buffer, bufferSize);
+ if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1;
return tsDecompressTimestampImp(buffer, nelements, output);
} else {
assert(0);
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index 2912b0a891b1e2f34d79cbb071ff782108dc4fbf..cc96f83f445a3b8e0ff1047b8fb0fd1d7721f7ce 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -376,6 +376,12 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
}
}
+ if (pe->num == 0) {
+ assert(pe->next == NULL);
+ } else {
+ assert(pe->next != NULL);
+ }
+
if (pHashObj->type == HASH_ENTRY_LOCK) {
taosWUnLockLatch(&pe->latch);
}
@@ -389,22 +395,8 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe
if (pRes != NULL) {
atomic_sub_fetch_64(&pHashObj->size, 1);
FREE_HASH_NODE(pHashObj, pRes);
-
- if (pe->num == 0) {
- assert(pe->next == NULL);
- } else {
- assert(pe->next != NULL);
- }
-
return 0;
} else {
-
- if (pe->num == 0) {
- assert(pe->next == NULL);
- } else {
- assert(pe->next != NULL);
- }
-
return -1;
}
}
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index 9df05a0ee89128122241ee52a4bf49611031490a..3b14254fff5cd5757b350242884c75565ef0353c 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -266,7 +266,12 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
}
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGetCB(pCacheObj->pHashTable, key, keyLen, incRefFn);
+ if (ptNode != NULL) {
+ assert ((*ptNode) != NULL && (int64_t) ((*ptNode)->data) != 0x40);
+ }
+
void* pData = (ptNode != NULL)? (*ptNode)->data:NULL;
+ assert((int64_t)pData != 0x40);
if (pData != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
@@ -349,7 +354,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
char* d = pNode->data;
int32_t ref = T_REF_VAL_GET(pNode);
- uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, key, d, ref - 1);
+ uDebug("cache:%s, key:%p, %p is released, refcnt:%d, intrash:%d", pCacheObj->name, key, d, ref - 1, inTrashCan);
/*
* If it is not referenced by other users, remove it immediately. Otherwise move this node to trashcan wait for all users
@@ -373,14 +378,23 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
} else {
// NOTE: remove it from hash in the first place, otherwise, the pNode may have been released by other thread
// when reaches here.
- int32_t ret = taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
+ SCacheDataNode* p = NULL;
+ int32_t ret = taosHashRemoveWithData(pCacheObj->pHashTable, pNode->key, pNode->keySize, &p, sizeof(void*));
ref = T_REF_DEC(pNode);
// successfully remove from hash table, if failed, this node must have been move to trash already, do nothing.
// note that the remove operation can be executed only once.
if (ret == 0) {
+ if (p != pNode) {
+ uDebug("cache:%s, key:%p, successfully removed a new entry:%p, refcnt:%d, prev entry:%p has been removed by others already", pCacheObj->name, pNode->key, p->data, T_REF_VAL_GET(p), pNode->data);
+ assert(p->pTNodeHeader == NULL);
+ taosAddToTrash(pCacheObj, p);
+ } else {
+
+ uDebug("cache:%s, key:%p, %p successfully removed from hash table, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
if (ref > 0) {
assert(pNode->pTNodeHeader == NULL);
+
taosAddToTrash(pCacheObj, pNode);
} else { // ref == 0
atomic_sub_fetch_64(&pCacheObj->totalSize, pNode->size);
@@ -395,6 +409,9 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
free(pNode);
}
+ }
+ } else {
+ uDebug("cache:%s, key:%p, %p has been removed from hash table by other thread already, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
}
}
@@ -483,12 +500,10 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
STrashElem *pElem = calloc(1, sizeof(STrashElem));
pElem->pData = pNode;
pElem->prev = NULL;
-
- pNode->pTNodeHeader = pElem;
pNode->inTrashCan = true;
+ pNode->pTNodeHeader = pElem;
__cache_wr_lock(pCacheObj);
-
pElem->next = pCacheObj->pTrash;
if (pCacheObj->pTrash) {
pCacheObj->pTrash->prev = pElem;
diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c
index 8c5828d32d2323cbdc7a05687c3e64e4e61a29af..1a5d28625f7061a4eec1ca41e654c2683c3ba9d5 100644
--- a/src/util/src/tcompression.c
+++ b/src/util/src/tcompression.c
@@ -47,10 +47,11 @@
*
*/
-#include "os.h"
#include "lz4.h"
-#include "tscompression.h"
+#include "os.h"
#include "taosdef.h"
+#include "tscompression.h"
+#include "tulog.h"
static const int TEST_NUMBER = 1;
#define is_bigendian() ((*(char *)&TEST_NUMBER) == 0)
@@ -88,7 +89,7 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o
word_length = CHAR_BYTES;
break;
default:
- perror("Wrong integer types.\n");
+ uError("Invalid compress integer type:%d", type);
return -1;
}
@@ -209,7 +210,7 @@ int tsDecompressINTImp(const char *const input, const int nelements, char *const
word_length = CHAR_BYTES;
break;
default:
- perror("Wrong integer types.\n");
+ uError("Invalid decompress integer type:%d", type);
return -1;
}
@@ -307,7 +308,7 @@ int tsCompressBoolImp(const char *const input, const int nelements, char *const
/* t = (~((( uint8_t)1) << (7-i%BITS_PER_BYTE))); */
output[pos] |= t;
} else {
- perror("Wrong bool value.\n");
+ uError("Invalid compress bool value:%d", output[pos]);
return -1;
}
}
@@ -363,7 +364,7 @@ int tsCompressBoolRLEImp(const char *const input, const int nelements, char *con
} else if (num == 0) {
output[_pos++] = (counter << 1) | INT8MASK(0);
} else {
- perror("Wrong bool value!\n");
+ uError("Invalid compress bool value:%d", output[_pos]);
return -1;
}
}
@@ -413,9 +414,7 @@ int tsDecompressStringImp(const char *const input, int compressedSize, char *con
/* It is compressed by LZ4 algorithm */
const int decompressed_size = LZ4_decompress_safe(input + 1, output, compressedSize - 1, outputSize);
if (decompressed_size < 0) {
- char msg[128] = {0};
- sprintf(msg, "decomp_size:%d, Error decompress in LZ4 algorithm!\n", decompressed_size);
- perror(msg);
+ uError("Failed to decompress string with LZ4 algorithm, decompressed size:%d", decompressed_size);
return -1;
}
@@ -425,7 +424,7 @@ int tsDecompressStringImp(const char *const input, int compressedSize, char *con
memcpy(output, input + 1, compressedSize - 1);
return compressedSize - 1;
} else {
- perror("Wrong compressed string indicator!\n");
+ uError("Invalid decompress string indicator:%d", input[0]);
return -1;
}
}
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index 704af2017e4acae1992b645fe6c60ba3157e5bcc..875c597008f19f6114bb6f6dbff8071925480afc 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -260,8 +260,13 @@ void taosReadGlobalLogCfg() {
}
strcpy(configDir, full_path.we_wordv[0]);
} else {
+ #ifdef _TD_POWER_
+ printf("configDir:%s not there, use default value: /etc/power", configDir);
+ strcpy(configDir, "/etc/power");
+ #else
printf("configDir:%s not there, use default value: /etc/taos", configDir);
strcpy(configDir, "/etc/taos");
+ #endif
}
wordfree(&full_path);
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index cf6ccdcbce8c25751bf9014fa696bae5e62ab825..766301914a60f347eeb71937d0806602f690ce7c 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -66,7 +66,11 @@ int32_t tsAsyncLog = 1;
float tsTotalLogDirGB = 0;
float tsAvailLogDirGB = 0;
float tsMinimalLogDirGB = 0.1f;
+#ifdef _TD_POWER_
+char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power";
+#else
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/taos";
+#endif
static SLogObj tsLogObj = { .fileNum = 1 };
static void * taosAsyncOutputLog(void *param);
diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c
new file mode 100644
index 0000000000000000000000000000000000000000..5a1430baede3cbcb2106f355420572a09d4c22cd
--- /dev/null
+++ b/src/util/src/tnettest.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "os.h"
+#include "taosdef.h"
+#include "taoserror.h"
+#include "tulog.h"
+#include "tconfig.h"
+#include "tglobal.h"
+#include "tsocket.h"
+
+#define MAX_PKG_LEN (64*1000)
+#define BUFFER_SIZE (MAX_PKG_LEN + 1024)
+
+typedef struct {
+ uint32_t hostIp;
+ uint16_t port;
+ uint16_t pktLen;
+} info_s;
+
+static char serverFqdn[TSDB_FQDN_LEN];
+static uint16_t g_startPort = 0;
+static uint16_t g_endPort = 6042;
+
+static void *bindUdpPort(void *sarg) {
+ info_s *pinfo = (info_s *)sarg;
+ int port = pinfo->port;
+ SOCKET serverSocket;
+
+ struct sockaddr_in server_addr;
+ struct sockaddr_in clientAddr;
+ char buffer[BUFFER_SIZE];
+ int iDataNum;
+
+ if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
+ perror("socket");
+ return NULL;
+ }
+
+ bzero(&server_addr, sizeof(server_addr));
+ server_addr.sin_family = AF_INET;
+ server_addr.sin_port = htons(port);
+ server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
+
+ if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
+ perror("connect");
+ return NULL;
+ }
+
+ socklen_t sin_size;
+
+ while (1) {
+ memset(buffer, 0, BUFFER_SIZE);
+
+ sin_size = sizeof(*(struct sockaddr *)&server_addr);
+
+ iDataNum = recvfrom(serverSocket, buffer, BUFFER_SIZE, 0, (struct sockaddr *)&clientAddr, &sin_size);
+
+ if (iDataNum < 0) {
+ perror("recvfrom null");
+ continue;
+ }
+ if (iDataNum > 0) {
+ printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum);
+ //printf("Read msg from udp:%s ... %s\n", buffer, buffer+iDataNum-16);
+
+ sendto(serverSocket, buffer, iDataNum, 0, (struct sockaddr *)&clientAddr, (int)sin_size);
+ }
+ }
+
+ taosCloseSocket(serverSocket);
+ return NULL;
+}
+
+static void *bindTcpPort(void *sarg) {
+ info_s *pinfo = (info_s *)sarg;
+ int port = pinfo->port;
+ SOCKET serverSocket;
+
+ struct sockaddr_in server_addr;
+ struct sockaddr_in clientAddr;
+ int addr_len = sizeof(clientAddr);
+ SOCKET client;
+ char buffer[BUFFER_SIZE];
+ int iDataNum = 0;
+
+ if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) {
+ printf("socket() fail: %s", strerror(errno));
+ return NULL;
+ }
+
+ bzero(&server_addr, sizeof(server_addr));
+ server_addr.sin_family = AF_INET;
+ server_addr.sin_port = htons(port);
+ server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
+
+ if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
+ printf("port:%d bind() fail: %s", port, strerror(errno));
+ return NULL;
+ }
+
+ if (listen(serverSocket, 5) < 0) {
+ printf("listen() fail: %s", strerror(errno));
+ return NULL;
+ }
+
+ //printf("Bind port: %d success\n", port);
+ while (1) {
+ client = accept(serverSocket, (struct sockaddr *)&clientAddr, (socklen_t *)&addr_len);
+ if (client < 0) {
+ printf("accept() fail: %s", strerror(errno));
+ continue;
+ }
+
+ iDataNum = 0;
+ memset(buffer, 0, BUFFER_SIZE);
+ int nleft, nread;
+ char *ptr = buffer;
+ nleft = pinfo->pktLen;
+ while (nleft > 0) {
+ nread = recv(client, ptr, BUFFER_SIZE, 0);
+
+ if (nread == 0) {
+ break;
+ } else if (nread < 0) {
+ if (errno == EINTR) {
+ continue;
+ } else {
+ printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", inet_ntoa(clientAddr.sin_addr), port, strerror(errno));
+ taosCloseSocket(serverSocket);
+ return NULL;
+ }
+ } else {
+ nleft -= nread;
+ ptr += nread;
+ iDataNum += nread;
+ }
+ }
+
+ printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum);
+ if (iDataNum > 0) {
+ send(client, buffer, iDataNum, 0);
+ }
+ }
+
+ taosCloseSocket(serverSocket);
+ return NULL;
+}
+
+static int checkTcpPort(info_s *info) {
+ struct sockaddr_in serverAddr;
+ SOCKET clientSocket;
+ char sendbuf[BUFFER_SIZE];
+ char recvbuf[BUFFER_SIZE];
+ int iDataNum = 0;
+ if ((clientSocket = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
+ printf("socket() fail: %s\n", strerror(errno));
+ return -1;
+ }
+
+ // set send and recv overtime
+ struct timeval timeout;
+ timeout.tv_sec = 2; //s
+ timeout.tv_usec = 0; //us
+ if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
+ perror("setsockopt send timer failed:");
+ }
+ if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
+ perror("setsockopt recv timer failed:");
+ }
+
+ serverAddr.sin_family = AF_INET;
+ serverAddr.sin_port = htons(info->port);
+
+ serverAddr.sin_addr.s_addr = info->hostIp;
+
+ //printf("=================================\n");
+ if (connect(clientSocket, (struct sockaddr *)&serverAddr, sizeof(serverAddr)) < 0) {
+ printf("connect() fail: %s\t", strerror(errno));
+ return -1;
+ }
+ //printf("Connect to: %s:%d...success\n", host, port);
+ memset(sendbuf, 0, BUFFER_SIZE);
+ memset(recvbuf, 0, BUFFER_SIZE);
+
+ struct in_addr ipStr;
+ memcpy(&ipStr, &info->hostIp, 4);
+ sprintf(sendbuf, "client send tcp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port);
+ sprintf(sendbuf + info->pktLen - 16, "1122334455667788");
+
+ send(clientSocket, sendbuf, info->pktLen, 0);
+
+ memset(recvbuf, 0, BUFFER_SIZE);
+ int nleft, nread;
+ char *ptr = recvbuf;
+ nleft = info->pktLen;
+ while (nleft > 0) {
+ nread = recv(clientSocket, ptr, BUFFER_SIZE, 0);;
+
+ if (nread == 0) {
+ break;
+ } else if (nread < 0) {
+ if (errno == EINTR) {
+ continue;
+ } else {
+ printf("recv ack pkg from TCP port: %d fail:%s.\n", info->port, strerror(errno));
+ taosCloseSocket(clientSocket);
+ return -1;
+ }
+ } else {
+ nleft -= nread;
+ ptr += nread;
+ iDataNum += nread;
+ }
+ }
+
+ if (iDataNum < info->pktLen) {
+ printf("recv ack pkg len: %d, less than req pkg len: %d from tcp port: %d\n", iDataNum, info->pktLen, info->port);
+ return -1;
+ }
+ //printf("Read ack pkg len:%d from tcp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8);
+
+ taosCloseSocket(clientSocket);
+ return 0;
+}
+
+static int checkUdpPort(info_s *info) {
+ struct sockaddr_in serverAddr;
+ SOCKET clientSocket;
+ char sendbuf[BUFFER_SIZE];
+ char recvbuf[BUFFER_SIZE];
+ int iDataNum = 0;
+ if ((clientSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
+ perror("socket");
+ return -1;
+ }
+
+ // set overtime
+ struct timeval timeout;
+ timeout.tv_sec = 2; //s
+ timeout.tv_usec = 0; //us
+ if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
+ perror("setsockopt send timer failed:");
+ }
+ if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
+ perror("setsockopt recv timer failed:");
+ }
+
+ serverAddr.sin_family = AF_INET;
+ serverAddr.sin_port = htons(info->port);
+ serverAddr.sin_addr.s_addr = info->hostIp;
+
+ memset(sendbuf, 0, BUFFER_SIZE);
+ memset(recvbuf, 0, BUFFER_SIZE);
+
+ struct in_addr ipStr;
+ memcpy(&ipStr, &info->hostIp, 4);
+ sprintf(sendbuf, "client send udp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port);
+ sprintf(sendbuf + info->pktLen - 16, "1122334455667788");
+
+ socklen_t sin_size = sizeof(*(struct sockaddr *)&serverAddr);
+
+ int code = sendto(clientSocket, sendbuf, info->pktLen, 0, (struct sockaddr *)&serverAddr, (int)sin_size);
+ if (code < 0) {
+ perror("sendto");
+ return -1;
+ }
+
+ iDataNum = recvfrom(clientSocket, recvbuf, BUFFER_SIZE, 0, (struct sockaddr *)&serverAddr, &sin_size);
+
+ if (iDataNum < info->pktLen) {
+ printf("Read ack pkg len: %d, less than req pkg len: %d from udp port: %d\t\t", iDataNum, info->pktLen, info->port);
+ return -1;
+ }
+
+ //printf("Read ack pkg len:%d from udp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8);
+ taosCloseSocket(clientSocket);
+ return 0;
+}
+
+static void checkPort(uint32_t hostIp, uint16_t startPort, uint16_t maxPort, uint16_t pktLen) {
+ int ret;
+ info_s info;
+ memset(&info, 0, sizeof(info_s));
+ info.hostIp = hostIp;
+ info.pktLen = pktLen;
+
+ for (uint16_t port = startPort; port <= maxPort; port++) {
+ //printf("test: %s:%d\n", info.host, port);
+ printf("\n");
+
+ info.port = port;
+ ret = checkTcpPort(&info);
+ if (ret != 0) {
+ printf("tcp port:%d test fail.\t\n", port);
+ } else {
+ printf("tcp port:%d test ok.\t\t", port);
+ }
+
+ ret = checkUdpPort(&info);
+ if (ret != 0) {
+ printf("udp port:%d test fail.\t\n", port);
+ } else {
+ printf("udp port:%d test ok.\t\t", port);
+ }
+ }
+
+ printf("\n");
+ return ;
+}
+
+static void taosNetTestClient(const char* serverFqdn, uint16_t startPort, uint16_t endPort, int pktLen) {
+ uint32_t serverIp = taosGetIpFromFqdn(serverFqdn);
+ if (serverIp == 0xFFFFFFFF) {
+ printf("Failed to resolve FQDN:%s", serverFqdn);
+ exit(-1);
+ }
+
+ checkPort(serverIp, startPort, endPort, pktLen);
+
+ return;
+}
+
+
+
+static void taosNetTestServer(uint16_t startPort, uint16_t endPort, int pktLen) {
+
+ int port = startPort;
+ int num = endPort - startPort + 1;
+
+ if (num < 0) {
+ num = 1;
+ }
+
+ pthread_t *pids = malloc(2 * num * sizeof(pthread_t));
+ info_s * tinfos = malloc(num * sizeof(info_s));
+ info_s * uinfos = malloc(num * sizeof(info_s));
+
+ for (size_t i = 0; i < num; i++) {
+ info_s *tcpInfo = tinfos + i;
+ tcpInfo->port = (uint16_t)(port + i);
+ tcpInfo->pktLen = pktLen;
+
+ if (pthread_create(pids + i, NULL, bindTcpPort, tcpInfo) != 0)
+ {
+ printf("create thread fail, port:%d.\n", port);
+ exit(-1);
+ }
+
+ info_s *udpInfo = uinfos + i;
+ udpInfo->port = (uint16_t)(port + i);
+ if (pthread_create(pids + num + i, NULL, bindUdpPort, udpInfo) != 0)
+ {
+ printf("create thread fail, port:%d.\n", port);
+ exit(-1);
+ }
+ }
+
+ for (int i = 0; i < num; i++) {
+ pthread_join(pids[i], NULL);
+ pthread_join(pids[(num + i)], NULL);
+ }
+}
+
+
+void taosNetTest(const char* host, uint16_t port, uint16_t endPort, int pktLen, const char* netTestRole) {
+ if (pktLen > MAX_PKG_LEN) {
+ printf("test packet len overflow: %d, max len not greater than %d bytes\n", pktLen, MAX_PKG_LEN);
+ exit(-1);
+ }
+
+ if (port && endPort) {
+ if (port > endPort) {
+ printf("endPort[%d] must not lesss port[%d]\n", endPort, port);
+ exit(-1);
+ }
+ }
+
+ if (host && host[0] != 0) {
+ if (strlen(host) >= TSDB_EP_LEN) {
+ printf("host invalid: %s\n", host);
+ exit(-1);
+ }
+
+ taosGetFqdnPortFromEp(host, serverFqdn, &g_startPort);
+ } else {
+ tstrncpy(serverFqdn, "127.0.0.1", TSDB_IPv4ADDR_LEN);
+ g_startPort = tsServerPort;
+ }
+
+ if (port) {
+ g_startPort = port;
+ }
+
+ if (endPort) {
+ g_endPort = endPort;
+ }
+
+ if (port > endPort) {
+ printf("endPort[%d] must not lesss port[%d]\n", g_endPort, g_startPort);
+ exit(-1);
+ }
+
+ if (0 == strcmp("client", netTestRole)) {
+ printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", serverFqdn, g_startPort, g_endPort, pktLen);
+ taosNetTestClient(serverFqdn, g_startPort, g_endPort, pktLen);
+ } else if (0 == strcmp("server", netTestRole)) {
+ taosNetTestServer(g_startPort, g_endPort, pktLen);
+ }
+}
+
diff --git a/src/util/src/version.c.in b/src/util/src/version.c.in
index c7aea2afb1fb40ea3ce1902cd47b8d2b76fee650..21c78a0eb4cad28ea1cdffcd58dc795ebd1000e5 100644
--- a/src/util/src/version.c.in
+++ b/src/util/src/version.c.in
@@ -1,7 +1,7 @@
-char version[12] = "${TD_VER_1}.${TD_VER_2}.${TD_VER_3}.${TD_VER_4}";
+char version[12] = "${TD_VER_NUMBER}";
char compatible_version[12] = "${TD_VER_COMPATIBLE}";
char gitinfo[48] = "${TD_VER_GIT}";
char gitinfoOfInternal[48] = "${TD_VER_GIT_INTERNAL}";
char buildinfo[64] = "Built at ${TD_VER_DATE}";
-void libtaos_${TD_VER_1}_${TD_VER_2}_${TD_VER_3}_${TD_VER_4}_${TD_VER_OSTYPE}_${TD_VER_CPUTYPE}_${TD_VER_VERTYPE}() {};
+void libtaos_${TD_LIB_VER_NUMBER}_${TD_VER_OSTYPE}_${TD_VER_CPUTYPE}_${TD_VER_VERTYPE}() {};
diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c
index f4065bb27452fba0e4cccbd8d7024e68579f5637..143f16a799244d4b77f3d01c4d2189fa6519b0d9 100644
--- a/tests/examples/lua/lua_connector.c
+++ b/tests/examples/lua/lua_connector.c
@@ -58,8 +58,10 @@ static int l_query(lua_State *L){
int table_index = lua_gettop(L);
// printf("receive command:%s\r\n",s);
- if(taos_query(taos, s)!=0){
- printf("failed, reason:%s\n", taos_errstr(taos));
+ result = taos_query(taos,s);
+ int32_t code = taos_errno(result);
+ if( code != 0){
+ printf("failed, reason:%s\n", taos_errstr(result));
lua_pushnumber(L, -1);
lua_setfield(L, table_index, "code");
lua_pushstring(L, taos_errstr(taos));
@@ -69,24 +71,13 @@ static int l_query(lua_State *L){
}else{
//printf("success to query.\n");
- result = taos_use_result(taos);
-
- if (result == NULL) {
- printf("failed to get result, reason:%s\n", taos_errstr(taos));
- lua_pushnumber(L, -2);
- lua_setfield(L, table_index, "code");
- lua_pushstring(L, taos_errstr(taos));
- lua_setfield(L, table_index, "error");
- return 1;
- }
-
TAOS_ROW row;
int rows = 0;
- int num_fields = taos_field_count(taos);
+ int num_fields = taos_field_count(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
char temp[256];
- int affectRows = taos_affected_rows(taos);
+ int affectRows = taos_affected_rows(result);
// printf(" affect rows:%d\r\n", affectRows);
lua_pushnumber(L, 0);
lua_setfield(L, table_index, "code");
@@ -155,15 +146,13 @@ static int l_query(lua_State *L){
}
void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
-
struct cb_param* p = (struct cb_param*) param;
TAOS_FIELD *fields = taos_fetch_fields(result);
int numFields = taos_num_fields(result);
+ printf("\nnumfields:%d\n", numFields);
printf("\n\r-----------------------------------------------------------------------------------\n");
- // printf("r:%d, L:%d\n",p->callback, p->state);
-
lua_State *L = p->state;
lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
diff --git a/tests/examples/lua/test.lua b/tests/examples/lua/test.lua
index 38ae1c82f2b1bbcb473257200241fb20ab44878b..4d5f9fe7d36f99f9a9452e8137bf298361a05685 100644
--- a/tests/examples/lua/test.lua
+++ b/tests/examples/lua/test.lua
@@ -15,7 +15,7 @@ else
conn = res.conn
end
-local res = driver.query(conn,"drop database demo")
+local res = driver.query(conn,"drop database if exists demo")
res = driver.query(conn,"create database demo")
if res.code ~=0 then
@@ -106,7 +106,7 @@ end
--From now on we begin continous query in an definite (infinite if you want) loop.
local loop_index = 0
-while loop_index < 20 do
+while loop_index < 10 do
local t = os.time()*1000
local v = loop_index
res = driver.query(conn,string.format("INSERT INTO therm1 VALUES (%d, %d)",t,v))
diff --git a/tests/pytest/crash_gen.py b/tests/pytest/crash_gen.py
index 7d3eb959c06e8c3f57b6b7a738d487f7bf04aab7..7588e03e17cd04afa615adc7274dd363421f8112 100755
--- a/tests/pytest/crash_gen.py
+++ b/tests/pytest/crash_gen.py
@@ -161,6 +161,21 @@ class WorkerThread:
logger.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...")
break
+ # Before we fetch the task and run it, let's ensure we properly "use" the database
+ try:
+ if (gConfig.per_thread_db_connection): # most likely TRUE
+ if not self._dbConn.isOpen: # might have been closed during server auto-restart
+ self._dbConn.open()
+ self.useDb() # might encounter exceptions. TODO: catch
+ except taos.error.ProgrammingError as err:
+ errno = Helper.convertErrno(err.errno)
+ if errno in [0x383, 0x386, 0x00B, 0x014] : # invalid database, dropping, Unable to establish connection, Database not ready
+ # ignore
+ dummy = 0
+ else:
+ print("\nCaught programming error. errno=0x{:X}, msg={} ".format(errno, err.msg))
+ raise
+
# Fetch a task from the Thread Coordinator
logger.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid))
task = tc.fetchTask()
@@ -324,10 +339,12 @@ class ThreadCoordinator:
logger.debug("[STT] transition ended")
# Due to limitation (or maybe not) of the Python library,
# we cannot share connections across threads
- if sm.hasDatabase():
- for t in self._pool.threadList:
- logger.debug("[DB] use db for all worker threads")
- t.useDb()
+ # Here we are in main thread, we cannot operate the connections created in workers
+ # Moving below to task loop
+ # if sm.hasDatabase():
+ # for t in self._pool.threadList:
+ # logger.debug("[DB] use db for all worker threads")
+ # t.useDb()
# t.execSql("use db") # main thread executing "use
# db" on behalf of every worker thread
except taos.error.ProgrammingError as err:
@@ -387,7 +404,7 @@ class ThreadCoordinator:
transitionFailed = self._doTransition() # To start, we end step -1 first
except taos.error.ProgrammingError as err:
transitionFailed = True
- errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correct error scheme
+ errno2 = Helper.convertErrno(err.errno) # correct error scheme
errMsg = "Transition failed: errno=0x{:X}, msg: {}".format(errno2, err)
logger.info(errMsg)
self._execStats.registerFailure(errMsg)
@@ -468,6 +485,10 @@ class ThreadCoordinator:
# We define a class to run a number of threads in locking steps.
+class Helper:
+ @classmethod
+ def convertErrno(cls, errno):
+ return errno if (errno > 0) else 0x80000000 + errno
class ThreadPool:
def __init__(self, numThreads, maxSteps):
@@ -613,8 +634,7 @@ class DbConn:
def resetDb(self): # reset the whole database, etc.
if (not self.isOpen):
- raise RuntimeError(
- "Cannot reset database until connection is open")
+ raise RuntimeError("Cannot reset database until connection is open")
# self._tdSql.prepare() # Recreate database, etc.
self.execute('drop database if exists db')
@@ -681,8 +701,7 @@ class DbConnRest(DbConn):
def close(self):
if (not self.isOpen):
- raise RuntimeError(
- "Cannot clean up database until connection is open")
+ raise RuntimeError("Cannot clean up database until connection is open")
# Do nothing for REST
logger.debug("[DB] REST Database connection closed")
self.isOpen = False
@@ -747,27 +766,32 @@ class DbConnRest(DbConn):
class MyTDSql:
- def __init__(self):
+ def __init__(self, hostAddr, cfgPath):
+ # Make the DB connection
+ self._conn = taos.connect(host=hostAddr, config=cfgPath)
+ self._cursor = self._conn.cursor()
+
self.queryRows = 0
self.queryCols = 0
self.affectedRows = 0
- def init(self, cursor, log=True):
- self.cursor = cursor
+ # def init(self, cursor, log=True):
+ # self.cursor = cursor
# if (log):
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# self.cursor.log(caller.filename + ".sql")
def close(self):
- self.cursor.close()
+ self._conn.close() # TODO: very important, cursor close does NOT close DB connection!
+ self._cursor.close()
def query(self, sql):
self.sql = sql
try:
- self.cursor.execute(sql)
- self.queryResult = self.cursor.fetchall()
+ self._cursor.execute(sql)
+ self.queryResult = self._cursor.fetchall()
self.queryRows = len(self.queryResult)
- self.queryCols = len(self.cursor.description)
+ self.queryCols = len(self._cursor.description)
except Exception as e:
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# args = (caller.filename, caller.lineno, sql, repr(e))
@@ -778,7 +802,7 @@ class MyTDSql:
def execute(self, sql):
self.sql = sql
try:
- self.affectedRows = self.cursor.execute(sql)
+ self.affectedRows = self._cursor.execute(sql)
except Exception as e:
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# args = (caller.filename, caller.lineno, sql, repr(e))
@@ -791,13 +815,13 @@ class DbConnNative(DbConn):
# Class variables
_lock = threading.Lock()
_connInfoDisplayed = False
+ totalConnections = 0 # Not private
def __init__(self):
super().__init__()
self._type = self.TYPE_NATIVE
self._conn = None
- self._cursor = None
-
+ # self._cursor = None
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -814,7 +838,8 @@ class DbConnNative(DbConn):
buildPath = root[:len(root) - len("/build/bin")]
break
if buildPath == None:
- raise RuntimeError("Failed to determine buildPath, selfPath={}".format(selfPath))
+ raise RuntimeError("Failed to determine buildPath, selfPath={}, projPath={}"
+ .format(selfPath, projPath))
return buildPath
@@ -822,33 +847,40 @@ class DbConnNative(DbConn):
cfgPath = self.getBuildPath() + "/test/cfg"
hostAddr = "127.0.0.1"
- with self._lock: # force single threading for opening DB connections
- if not self._connInfoDisplayed:
- self.__class__._connInfoDisplayed = True # updating CLASS variable
- logger.info("Initiating TAOS native connection to {}, using config at {}".format(hostAddr, cfgPath))
-
- self._conn = taos.connect(host=hostAddr, config=cfgPath) # TODO: make configurable
- self._cursor = self._conn.cursor()
+ cls = self.__class__ # Get the class, to access class variables
+ with cls._lock: # force single threading for opening DB connections. # TODO: whaaat??!!!
+ if not cls._connInfoDisplayed:
+ cls._connInfoDisplayed = True # updating CLASS variable
+ logger.info("Initiating TAOS native connection to {}, using config at {}".format(hostAddr, cfgPath))
+ # Make the connection
+ # self._conn = taos.connect(host=hostAddr, config=cfgPath) # TODO: make configurable
+ # self._cursor = self._conn.cursor()
+ # Record the count in the class
+ self._tdSql = MyTDSql(hostAddr, cfgPath) # making DB connection
+ cls.totalConnections += 1
- self._cursor.execute('reset query cache')
+ self._tdSql.execute('reset query cache')
# self._cursor.execute('use db') # do this at the beginning of every
# Open connection
- self._tdSql = MyTDSql()
- self._tdSql.init(self._cursor)
-
+ # self._tdSql = MyTDSql()
+ # self._tdSql.init(self._cursor)
+
def close(self):
if (not self.isOpen):
- raise RuntimeError(
- "Cannot clean up database until connection is open")
+ raise RuntimeError("Cannot clean up database until connection is open")
self._tdSql.close()
+ # Decrement the class wide counter
+ cls = self.__class__ # Get the class, to access class variables
+ with cls._lock:
+ cls.totalConnections -= 1
+
logger.debug("[DB] Database connection closed")
self.isOpen = False
def execute(self, sql):
if (not self.isOpen):
- raise RuntimeError(
- "Cannot execute database commands until connection is open")
+ raise RuntimeError("Cannot execute database commands until connection is open")
logger.debug("[SQL] Executing SQL: {}".format(sql))
self._lastSql = sql
nRows = self._tdSql.execute(sql)
@@ -1528,7 +1560,7 @@ class Task():
try:
self._executeInternal(te, wt) # TODO: no return value?
except taos.error.ProgrammingError as err:
- errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno # correct error scheme
+ errno2 = Helper.convertErrno(err.errno)
if (gConfig.continue_on_exception): # user choose to continue
self.logDebug("[=] Continue after TAOS exception: errno=0x{:X}, msg: {}, SQL: {}".format(
errno2, err, wt.getDbConn().getLastSql()))
@@ -1678,9 +1710,8 @@ class ExecutionStats:
logger.info(
"| Total Elapsed Time (from wall clock): {:.3f} seconds".format(
self._elapsedTime))
- logger.info(
- "| Top numbers written: {}".format(
- TaskExecutor.getBoundedList()))
+ logger.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList()))
+ logger.info("| Total Number of Active DB Native Connections: {}".format(DbConnNative.totalConnections))
logger.info(
"----------------------------------------------------------------------")
@@ -1789,7 +1820,7 @@ class TdSuperTable:
try:
dbc.query("select TBNAME from db.{}".format(self._stName)) # TODO: analyze result set later
except taos.error.ProgrammingError as err:
- errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno
+ errno2 = Helper.convertErrno(err.errno)
logger.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err))
raise
@@ -1891,7 +1922,7 @@ class TaskReadData(StateTransitionTask):
if aggExpr not in ['stddev(speed)']: #TODO: STDDEV not valid for super tables?!
dbc.execute("select {} from db.{}".format(aggExpr, sTable.getName()))
except taos.error.ProgrammingError as err:
- errno2 = err.errno if (err.errno > 0) else 0x80000000 + err.errno
+ errno2 = Helper.convertErrno(err.errno)
logger.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql()))
raise
@@ -1920,9 +1951,8 @@ class TaskDropSuperTable(StateTransitionTask):
self.execWtSql(wt, "drop table {}".format(
regTableName)) # nRows always 0, like MySQL
except taos.error.ProgrammingError as err:
- # correcting for strange error number scheme
- errno2 = err.errno if (
- err.errno > 0) else 0x80000000 + err.errno
+ # correcting for strange error number scheme
+ errno2 = Helper.convertErrno(err.errno)
if (errno2 in [0x362]): # mnode invalid table name
isSuccess = False
logger.debug(
@@ -2429,7 +2459,11 @@ class ServiceManagerThread:
for line in iter(out.readline, b''):
# print("Finished reading a line: {}".format(line))
# print("Adding item to queue...")
- line = line.decode("utf-8").rstrip()
+ try:
+ line = line.decode("utf-8").rstrip()
+ except UnicodeError:
+ print("\nNon-UTF8 server output: {}\n".format(line))
+
# This might block, and then causing "out" buffer to block
queue.put(line)
self._printProgress("_i")
@@ -2455,7 +2489,7 @@ class ServiceManagerThread:
def svcErrorReader(self, err: IO, queue):
for line in iter(err.readline, b''):
- print("\nTD Svc STDERR: {}".format(line))
+ print("\nTDengine Service (taosd) ERROR (from stderr): {}".format(line))
class TdeSubProcess:
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index fd5aa4ecf06b16ed6faa9d020cc4c598c65ffb9e..d600a003b08ab296245c06c0193552e628b2710d 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -12,7 +12,7 @@ python3 ./test.py -f insert/tinyint.py
python3 ./test.py -f insert/date.py
python3 ./test.py -f insert/binary.py
python3 ./test.py -f insert/nchar.py
-python3 ./test.py -f insert/nchar-boundary.py
+#python3 ./test.py -f insert/nchar-boundary.py
python3 ./test.py -f insert/nchar-unicode.py
python3 ./test.py -f insert/multi.py
python3 ./test.py -f insert/randomNullCommit.py
@@ -20,7 +20,7 @@ python3 ./test.py -f insert/randomNullCommit.py
python3 ./test.py -f table/column_name.py
python3 ./test.py -f table/column_num.py
python3 ./test.py -f table/db_table.py
-python3 ./test.py -f table/tablename-boundary.py
+#python3 ./test.py -f table/tablename-boundary.py
# tag
python3 ./test.py -f tag_lite/filter.py
@@ -52,7 +52,7 @@ python3 ./test.py -f tag_lite/set.py
python3 ./test.py -f tag_lite/smallint.py
python3 ./test.py -f tag_lite/tinyint.py
-python3 ./test.py -f dbmgmt/database-name-boundary.py
+#python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 ./test.py -f import_merge/importBlock1HO.py
python3 ./test.py -f import_merge/importBlock1HPO.py
@@ -145,6 +145,8 @@ python3 ./test.py -f query/queryJoin.py
python3 ./test.py -f query/select_last_crash.py
python3 ./test.py -f query/queryNullValueTest.py
python3 ./test.py -f query/queryInsertValue.py
+python3 ./test.py -f query/queryConnection.py
+python3 ./test.py -f query/natualInterval.py
#stream
python3 ./test.py -f stream/metric_1.py
@@ -182,7 +184,7 @@ python3 ./test.py -f functions/function_spread.py
python3 ./test.py -f functions/function_stddev.py
python3 ./test.py -f functions/function_sum.py
python3 ./test.py -f functions/function_top.py
-python3 ./test.py -f functions/function_twa.py
+#python3 ./test.py -f functions/function_twa.py
# tools
python3 test.py -f tools/taosdemo.py
diff --git a/tests/pytest/insert/nchar.py b/tests/pytest/insert/nchar.py
index b8e365f143abfca2eb39f258078fb59120c168e1..3319aa3c565b76d4d7fb9c33b04549e090c8062b 100644
--- a/tests/pytest/insert/nchar.py
+++ b/tests/pytest/insert/nchar.py
@@ -35,6 +35,8 @@ class TDTestCase:
tdSql.checkRows(2)
tdSql.checkData(1, 1, '涛思数据')
+ tdSql.error("insert into tb values (now, 'taosdata001')")
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/query/filterOtherTypes.py b/tests/pytest/query/filterOtherTypes.py
index 5033ffdb48237e6a758d67162af5757cce04252d..85d5a67bef04564741ae7c17bd12ee992fcc09f8 100644
--- a/tests/pytest/query/filterOtherTypes.py
+++ b/tests/pytest/query/filterOtherTypes.py
@@ -365,6 +365,54 @@ class TDTestCase:
# _ for binary type on tag case 5
tdSql.query("select * from st where tagcol3 like '_据'")
tdSql.checkRows(0)
+
+ # test case for https://jira.taosdata.com:18080/browse/TD-857
+ tdSql.execute("create database test")
+ tdSql.execute("use test")
+ tdSql.execute("create table meters(ts timestamp, voltage int) tags(tag1 binary(20))")
+ tdSql.execute("create table t1 using meters tags('beijing')")
+ tdSql.execute("create table t2 using meters tags('nanjing')")
+
+ tdSql.execute("insert into t1 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+ tdSql.execute("insert into t2 values(1538548685000, 4) (1538548685001, 5) (1538548685002, 6)")
+
+ tdSql.query("select * from t1 where tag1 like '%g'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from t2 where tag1 like '%g'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters where tag1 like '%g'")
+ tdSql.checkRows(6)
+
+ tdSql.execute("create table meters1(ts timestamp, voltage int) tags(tag1 nchar(20))")
+ tdSql.execute("create table t3 using meters1 tags('北京')")
+ tdSql.execute("create table t4 using meters1 tags('南京')")
+ tdSql.execute("create table t5 using meters1 tags('beijing')")
+ tdSql.execute("create table t6 using meters1 tags('nanjing')")
+
+ tdSql.execute("insert into t3 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+ tdSql.execute("insert into t4 values(1538548685000, 4) (1538548685001, 5) (1538548685002, 6)")
+ tdSql.execute("insert into t5 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+ tdSql.execute("insert into t6 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+
+ tdSql.query("select * from t3 where tag1 like '%京'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from t4 where tag1 like '%京'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters1 where tag1 like '%京'")
+ tdSql.checkRows(6)
+
+ tdSql.query("select * from t5 where tag1 like '%g'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from t6 where tag1 like '%g'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters1 where tag1 like '%g'")
+ tdSql.checkRows(6)
def stop(self):
tdSql.close()
diff --git a/tests/pytest/query/natualInterval.py b/tests/pytest/query/natualInterval.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ed91e1c6812a93bd5995cb0249781611fa210b4
--- /dev/null
+++ b/tests/pytest/query/natualInterval.py
@@ -0,0 +1,170 @@
+###################################################################
+# Copyright (c) 2020 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ def singleTable(self):
+ tdSql.execute("create table car(ts timestamp, s int)")
+ tdSql.execute("insert into car values('2019-01-01 00:00:00', 1)")
+ tdSql.execute("insert into car values('2019-05-13 12:00:00', 1)")
+ tdSql.execute("insert into car values('2019-12-31 23:59:59', 1)")
+ tdSql.execute("insert into car values('2020-01-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-03 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-04 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-05 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-01-31 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-02-29 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-01 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-02 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-15 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-03-31 12:00:00', 1)")
+ tdSql.execute("insert into car values('2020-05-01 12:00:00', 1)")
+
+ tdSql.query("select count(*) from car interval(1n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 1)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(4, 1, 3)
+ tdSql.checkData(5, 1, 4)
+ tdSql.checkData(6, 1, 1)
+
+ tdSql.query("select count(*) from car interval(1n) order by ts desc")
+ tdSql.checkData(6, 1, 1)
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(2, 1, 3)
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select count(*) from car interval(2n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 1)
+ tdSql.checkData(3, 1, 9)
+ tdSql.checkData(4, 1, 4)
+ tdSql.checkData(5, 1, 1)
+
+ tdSql.query("select count(*) from car interval(2n) order by ts desc")
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(3, 1, 1)
+ tdSql.checkData(2, 1, 9)
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select count(*) from car interval(1y)")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 14)
+
+ tdSql.query("select count(*) from car interval(2y)")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 14)
+
+
+ def superTable(self):
+ tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
+ tdSql.execute("create table car0 using cars tags(0)")
+ tdSql.execute("create table car1 using cars tags(0)")
+ tdSql.execute("create table car2 using cars tags(0)")
+ tdSql.execute("create table car3 using cars tags(0)")
+ tdSql.execute("create table car4 using cars tags(0)")
+
+ tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
+ tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2019-12-31 23:59:59', 1)")
+ tdSql.execute("insert into car1 values('2020-01-01 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-02 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-03 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-04 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-05 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-01-31 12:00:00', 1)")
+ tdSql.execute("insert into car1 values('2020-02-01 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2020-02-02 12:00:00', 1)")
+ tdSql.execute("insert into car2 values('2020-02-29 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-01 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-02 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-03-15 12:00:00', 1)")
+ tdSql.execute("insert into car4 values('2020-03-31 12:00:00', 1)")
+ tdSql.execute("insert into car3 values('2020-05-01 12:00:00', 1)")
+
+ tdSql.query("select count(*) from cars interval(1n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 1)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(4, 1, 3)
+ tdSql.checkData(5, 1, 4)
+ tdSql.checkData(6, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(1n) order by ts desc")
+ tdSql.checkData(6, 1, 1)
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(3, 1, 6)
+ tdSql.checkData(2, 1, 3)
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(2n)")
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 1)
+ tdSql.checkData(2, 1, 1)
+ tdSql.checkData(3, 1, 9)
+ tdSql.checkData(4, 1, 4)
+ tdSql.checkData(5, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(2n) order by ts desc")
+ tdSql.checkData(5, 1, 1)
+ tdSql.checkData(4, 1, 1)
+ tdSql.checkData(3, 1, 1)
+ tdSql.checkData(2, 1, 9)
+ tdSql.checkData(1, 1, 4)
+ tdSql.checkData(0, 1, 1)
+
+ tdSql.query("select count(*) from cars interval(1y)")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 14)
+
+ tdSql.query("select count(*) from cars interval(2y)")
+ tdSql.checkData(0, 1, 3)
+ tdSql.checkData(1, 1, 14)
+
+
+ def run(self):
+ tdSql.prepare()
+ self.singleTable()
+ self.superTable()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryConnection.py b/tests/pytest/query/queryConnection.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed05b5e6bda9d60701d6f957b00311e77aebe356
--- /dev/null
+++ b/tests/pytest/query/queryConnection.py
@@ -0,0 +1,52 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev nchar(50))")
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_01")')
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev_02")')
+
+ tdSql.execute(
+ """INSERT INTO dev_001(ts, tagtype) VALUES('2020-05-13 10:00:00.000', 1),
+ ('2020-05-13 10:00:00.001', 1)
+ dev_002 VALUES('2020-05-13 10:00:00.001', 1)""")
+
+ for i in range(10):
+ for j in range(1000):
+ tdSql.query("select * from db.st")
+ tdLog.sleep(10)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryJoin.py b/tests/pytest/query/queryJoin.py
index 6ea240a334b4f4baf267aaacf53cfa0940c6c388..17027cf498ff9e87b558866cd4d1e6a8c865afc0 100644
--- a/tests/pytest/query/queryJoin.py
+++ b/tests/pytest/query/queryJoin.py
@@ -114,6 +114,32 @@ class TDTestCase:
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.pid, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
+ # test case for https://jira.taosdata.com:18080/browse/TD-1250
+
+ tdSql.execute("create table meters1(ts timestamp, voltage int) tags(tag1 binary(20), tag2 nchar(20))")
+ tdSql.execute("create table t1 using meters1 tags('beijing', 'chaoyang')")
+ tdSql.execute("create table t2 using meters1 tags('shanghai', 'xuhui')")
+ tdSql.execute("insert into t1 values(1538548685000, 1) (1538548685001, 2) (1538548685002, 3)")
+ tdSql.execute("insert into t1 values(1538548685004, 4) (1538548685004, 5) (1538548685005, 6)")
+
+ tdSql.execute("create table meters2(ts timestamp, voltage int) tags(tag1 binary(20), tag2 nchar(20))")
+ tdSql.execute("create table t3 using meters2 tags('beijing', 'chaoyang')")
+ tdSql.execute("create table t4 using meters2 tags('shenzhen', 'nanshan')")
+ tdSql.execute("insert into t3 values(1538548685000, 7) (1538548685001, 8) (1538548685002, 9)")
+ tdSql.execute("insert into t4 values(1538548685000, 10) (1538548685001, 11) (1538548685002, 12)")
+
+ tdSql.execute("create table meters3(ts timestamp, voltage int) tags(tag1 binary(20), tag2 nchar(20))")
+
+ tdSql.query("select * from meters1, meters2 where meters1.ts = meters2.ts and meters1.tag1 = meters2.tag1")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters1, meters2 where meters1.ts = meters2.ts and meters1.tag2 = meters2.tag2")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from meters1, meters3 where meters1.ts = meters3.ts and meters1.tag1 = meters3.tag1")
+ tdSql.checkRows(0)
+
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/bigint.py b/tests/pytest/tag_lite/bigint.py
index ecc6fad44d9a66acb2aaf4e47560eff5204a6cfe..f83961aaad401f337a1dc4f0d305391f22006b9a 100644
--- a/tests/pytest/tag_lite/bigint.py
+++ b/tests/pytest/tag_lite/bigint.py
@@ -575,6 +575,20 @@ class TDTestCase:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev bigint)")
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 63) - 1))
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (-1 * pow(2, 63) + 1))
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/binary.py b/tests/pytest/tag_lite/binary.py
index a5757dc5cd664114da066cbfde485ca680e9a6d8..4cbae63bec1ec6a885f015a69bb4192e93cc9629 100644
--- a/tests/pytest/tag_lite/binary.py
+++ b/tests/pytest/tag_lite/binary.py
@@ -579,6 +579,20 @@ class TDTestCase:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev binary(5))")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_001")')
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev")')
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(1)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/int.py b/tests/pytest/tag_lite/int.py
index 99d4a69624812e853bfaa2ffad89c9d9e88879a5..d5a69173893c4e5c8341b6235c5dd8465f913111 100644
--- a/tests/pytest/tag_lite/int.py
+++ b/tests/pytest/tag_lite/int.py
@@ -574,6 +574,24 @@ class TDTestCase:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev int)")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % pow(2, 31))
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1 * pow(2, 31)))
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 31) - 1))
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (-1 * pow(2, 31) + 1))
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/nchar.py b/tests/pytest/tag_lite/nchar.py
new file mode 100644
index 0000000000000000000000000000000000000000..851cc32b56c9104e2f87d5c8ea58ffa0d56ac1a6
--- /dev/null
+++ b/tests/pytest/tag_lite/nchar.py
@@ -0,0 +1,48 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ print("==============step1")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev nchar(5))")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_001")')
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev")')
+
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(1)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tag_lite/smallint.py b/tests/pytest/tag_lite/smallint.py
index 089af55a3496bfaf59e833c18d5f7d34175c3212..c060e3f82b6b5f2b6d32fd50e5ea8181f655ad9b 100644
--- a/tests/pytest/tag_lite/smallint.py
+++ b/tests/pytest/tag_lite/smallint.py
@@ -574,6 +574,23 @@ class TDTestCase:
# TSIM:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev smallint)")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % pow(2, 15))
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1 * pow(2, 15)))
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 15) - 1))
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (-1 * pow(2, 15) + 1))
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tag_lite/tinyint.py b/tests/pytest/tag_lite/tinyint.py
index 55e33d013beaddccf769aec4b28719d1e5cb9db8..089dd46569578c1ca33d73639cb36a0bba03e0f0 100644
--- a/tests/pytest/tag_lite/tinyint.py
+++ b/tests/pytest/tag_lite/tinyint.py
@@ -575,6 +575,24 @@ class TDTestCase:
# TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
+ tdSql.execute("create database db")
+ tdSql.execute("use db")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int) tags(dev tinyint)")
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % pow(2, 7))
+ tdSql.error(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (-1 * pow(2, 7)))
+
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags(%d)' % (pow(2, 7) - 1))
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags(%d)' % (-1 * pow(2, 7) + 1))
+
+ print("==============step2")
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index 15cc2954e87c7c6f2e23472e8e22d347844bc6cf..d137e53d2721842e4921b10450b3432589c15e37 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -24,6 +24,7 @@ run general/compute/diff2.sim
run general/compute/first.sim
run general/compute/interval.sim
run general/compute/last.sim
+run general/compute/last_row.sim
run general/compute/leastsquare.sim
run general/compute/max.sim
run general/compute/min.sim
diff --git a/tests/script/general/compute/last_row.sim b/tests/script/general/compute/last_row.sim
new file mode 100644
index 0000000000000000000000000000000000000000..cc5cc3edbbf47f57eec723eba007c79fc03f150b
--- /dev/null
+++ b/tests/script/general/compute/last_row.sim
@@ -0,0 +1,175 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 0
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+sql connect
+
+$dbPrefix = m_la_db
+$tbPrefix = m_la_tb
+$mtPrefix = m_la_mt
+$tbNum = 10
+$rowNum = 20
+$totalNum = 200
+
+print =============== step1
+$i = 0
+$db = $dbPrefix . $i
+$mt = $mtPrefix . $i
+
+sql drop database $db -x step1
+step1:
+sql create database $db
+sql use $db
+sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int)
+
+$i = 0
+while $i < $tbNum
+ $tb = $tbPrefix . $i
+ sql create table $tb using $mt tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ms = $x . m
+ sql insert into $tb values (now + $ms , $x )
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+endw
+
+sleep 100
+
+print =============== step2
+$i = 1
+$tb = $tbPrefix . $i
+
+sql select last_row(tbcol) from $tb
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+print =============== step3
+sql select last_row(tbcol) from $tb where ts < now + 4m
+print ===> $data00
+if $data00 != 4 then
+ return -1
+endi
+
+print =============== step4
+sql select last_row(tbcol) as b from $tb
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+
+
+print =============== step7
+sql select last_row(tbcol) from $mt
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+print =============== step8
+sql select last_row(tbcol) as c from $mt where ts < now + 4m
+print ===> $data00
+if $data00 != 4 then
+ return -1
+endi
+
+sql select last_row(tbcol) as c from $mt where tgcol < 5
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+sql select last_row(tbcol) as c from $mt where tgcol < 5 and ts < now + 4m
+print ===> $data00
+if $data00 != 4 then
+ return -1
+endi
+
+
+
+print =============== step10
+sql select last_row(tbcol) as b from $mt group by tgcol
+print ===> $data00
+if $data00 != 19 then
+ return -1
+endi
+
+if $rows != $tbNum then
+ return -1
+endi
+
+print =============== step11
+
+sql insert into $tb values(now + 1h, 10)
+sql insert into $tb values(now + 3h, null)
+sql insert into $tb values(now + 5h, -1)
+sql insert into $tb values(now + 7h, null)
+
+## for super table
+sql select last_row(*) from $mt where ts < now + 6h
+if $data01 != -1 then
+ return -1
+endi
+
+sql select last_row(*) from $mt where ts < now + 8h
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $mt
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $mt where ts < now + 4h
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $mt where ts > now + 1h and ts < now + 4h
+if $data01 != NULL then
+ return -1
+endi
+
+## for table
+sql select last_row(*) from $tb where ts < now + 6h
+if $data01 != -1 then
+ return -1
+endi
+
+sql select last_row(*) from $tb where ts < now + 8h
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $tb
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $tb where ts < now + 4h
+if $data01 != NULL then
+ return -1
+endi
+
+sql select last_row(*) from $tb where ts > now + 1h and ts < now + 4h
+if $data01 != NULL then
+ return -1
+endi
+
+print =============== clear
+sql drop database $db
+sql show databases
+if $rows != 0 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index c9e081a70b0691ee0b8f47c29fde4735c95cf78d..40598332d998191402cc411e0c6156f302d99267 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -293,6 +293,7 @@ cd ../../../debug; make
./test.sh -f unique/stable/replica3_dnode6.sim
./test.sh -f unique/stable/replica3_vnode3.sim
+./test.sh -f unique/mnode/mgmt21.sim
./test.sh -f unique/mnode/mgmt22.sim
./test.sh -f unique/mnode/mgmt23.sim
./test.sh -f unique/mnode/mgmt24.sim
diff --git a/tests/script/jenkins/unique.txt b/tests/script/jenkins/unique.txt
index 06edb8890a9f8b9c246703397cec3718313f637d..b271f5b726bd6e9e50265420ec6cfa6695b0f551 100644
--- a/tests/script/jenkins/unique.txt
+++ b/tests/script/jenkins/unique.txt
@@ -1,22 +1,11 @@
cd ../../../debug; cmake ..
cd ../../../debug; make
-./test.sh -f unique/account/account_create.sim
-./test.sh -f unique/account/account_delete.sim
-./test.sh -f unique/account/account_len.sim
-./test.sh -f unique/account/authority.sim
-./test.sh -f unique/account/basic.sim
-./test.sh -f unique/account/paras.sim
-./test.sh -f unique/account/pass_alter.sim
-./test.sh -f unique/account/pass_len.sim
-./test.sh -f unique/account/usage.sim
-./test.sh -f unique/account/user_create.sim
-./test.sh -f unique/account/user_len.sim
-
./test.sh -f unique/big/balance.sim
./test.sh -f unique/big/maxvnodes.sim
./test.sh -f unique/big/tcp.sim
+./test.sh -f unique/cluster/alter.sim
./test.sh -f unique/cluster/balance1.sim
./test.sh -f unique/cluster/balance2.sim
./test.sh -f unique/cluster/balance3.sim
@@ -61,6 +50,7 @@ cd ../../../debug; make
./test.sh -f unique/stable/replica3_dnode6.sim
./test.sh -f unique/stable/replica3_vnode3.sim
+./test.sh -f unique/mnode/mgmt21.sim
./test.sh -f unique/mnode/mgmt22.sim
./test.sh -f unique/mnode/mgmt23.sim
./test.sh -f unique/mnode/mgmt24.sim
@@ -77,31 +67,16 @@ cd ../../../debug; make
./test.sh -f unique/vnode/replica3_repeat.sim
./test.sh -f unique/vnode/replica3_vgroup.sim
-./test.sh -f general/stream/metrics_1.sim
-./test.sh -f general/stream/metrics_del.sim
-./test.sh -f general/stream/metrics_n.sim
-./test.sh -f general/stream/metrics_replica1_vnoden.sim
-#./test.sh -f general/stream/new_stream.sim
-./test.sh -f general/stream/restart_stream.sim
-./test.sh -f general/stream/stream_1.sim
-./test.sh -f general/stream/stream_2.sim
-./test.sh -f general/stream/stream_3.sim
-./test.sh -f general/stream/stream_restart.sim
-./test.sh -f general/stream/table_1.sim
-./test.sh -f general/stream/table_del.sim
-./test.sh -f general/stream/table_n.sim
-./test.sh -f general/stream/table_replica1_vnoden.sim
-
./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim
-./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
+#./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
-./test.sh -f unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
-./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
-./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
+#./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
+#./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim
-./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
+#./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
+# lower the priority while file corruption
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_online.sim
@@ -126,6 +101,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/replica_changeWithArbitrator.sim
./test.sh -f unique/arbitrator/sync_replica2_alterTable_add.sim
./test.sh -f unique/arbitrator/sync_replica2_alterTable_drop.sim
+
./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim
./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim
diff --git a/tests/script/tmp/182.sim b/tests/script/tmp/182.sim
new file mode 100644
index 0000000000000000000000000000000000000000..a178282cf835b236a202c71f1ef5a595e784324c
--- /dev/null
+++ b/tests/script/tmp/182.sim
@@ -0,0 +1,41 @@
+system sh/stop_dnodes.sh
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+system sh/cfg.sh -n dnode3 -c walLevel -v 2
+system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c activeCode -v eglxDLzRpslJWl7OxrPZ2K3sQ5631AP9SVpezsaz2dhJWl7OxrPZ2ElaXs7Gs9nYSVpezsaz2djGIj5StnQ3ZvLHcsE8cwcN
+
+system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
+system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
+system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
+system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
+
+system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 100000
+system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 100000
+system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 100000
+system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 100000
+
+system sh/cfg.sh -n dnode1 -c http -v 1
+system sh/cfg.sh -n dnode2 -c http -v 1
+system sh/cfg.sh -n dnode3 -c http -v 1
+system sh/cfg.sh -n dnode4 -c http -v 1
+
+system sh/cfg.sh -n dnode1 -c httpMaxThreads -v 4
+
+system sh/cfg.sh -n dnode1 -c firstEp -v 127.0.0.1:6030
+system sh/cfg.sh -n dnode1 -c secondEp -v 127.0.0.1:6030
+system sh/cfg.sh -n dnode1 -c serverPort -v 6030
+system sh/cfg.sh -n dnode1 -c fqdn -v 127.0.0.1
+system sh/exec.sh -n dnode1 -s start
diff --git a/tests/script/tmp/prepare.sim b/tests/script/tmp/prepare.sim
index ab2ad01c32f752ae1e1e0dd3afab3c7a221160df..8b8f206233f9125362cb29c45817819488b67b6e 100644
--- a/tests/script/tmp/prepare.sim
+++ b/tests/script/tmp/prepare.sim
@@ -32,6 +32,7 @@ system sh/cfg.sh -n dnode2 -c http -v 1
system sh/cfg.sh -n dnode3 -c http -v 1
system sh/cfg.sh -n dnode4 -c http -v 1
+return
# for crash_gen
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
system sh/cfg.sh -n dnode1 -c rpcMaxTime -v 101
diff --git a/tests/script/unique/account/usage.sim b/tests/script/unique/account/usage.sim
index 3f12c4613736fa3022a9608c4703fbd2baec9e06..7fde3652017f5b17e61a39891b1f55c5940e2061 100644
--- a/tests/script/unique/account/usage.sim
+++ b/tests/script/unique/account/usage.sim
@@ -112,21 +112,41 @@ print =============== step4
sql insert into d1.t1 values(now + 1s, 1)
sql insert into d1.t1 values(now + 2s, 2)
-# no write auth
-sleep 3000
+sleep 10000
+print no write auth
sql_error insert into d1.t1 values(now + 3s, 2)
sql_error insert into d1.t1 values(now + 4s, 2)
sql alter account root pass "taosdata" tseries 10 storage 36 streams 10 dbs 5 users 5
-sleep 3000
+sleep 10000
+print has write auth
sql insert into d1.t1 values(now + 5s, 1)
sql insert into d1.t1 values(now + 6s, 2)
# no write auth
-sleep 3000
+sleep 10000
+print no write auth
sql_error insert into d1.t1 values(now + 7s, 2)
sql_error insert into d1.t1 values(now + 8s, 2)
+print =============== step5
+sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
+sleep 10000
+
+sql insert into d1.t1 values(now + 11s, 1)
+sql insert into d1.t1 values(now + 12s, 2)
+
+sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state no
+sleep 10000
+print no write auth
+sql_error insert into d1.t1 values(now + 13s, 2)
+sql_error insert into d1.t1 values(now + 14s, 2)
+
+sql alter account root pass "taosdata" tseries 10 storage 3600 streams 10 dbs 5 users 5 state all
+sleep 10000
+print has write auth
+sql insert into d1.t1 values(now + 15s, 1)
+sql insert into d1.t1 values(now + 16s, 2)
print =============== check grant
sql_error create database d6
diff --git a/tests/script/unique/mnode/mgmt21.sim b/tests/script/unique/mnode/mgmt21.sim
new file mode 100644
index 0000000000000000000000000000000000000000..53ad0eebe7cc1798647226452479db47d1476528
--- /dev/null
+++ b/tests/script/unique/mnode/mgmt21.sim
@@ -0,0 +1,44 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+
+print ============== step1
+system sh/exec.sh -n dnode2 -s start
+sleep 10000
+
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+sql show mnodes
+print dnode1 ==> $data2_1
+print dnode2 ==> $data2_2
+if $data2_1 != master then
+ return -1
+endi
+
+print ============== step2
+sql create dnode $hostname2
+
+$x = 0
+show2:
+ $x = $x + 1
+ sleep 2000
+ if $x == 10 then
+ return -1
+ endi
+
+sql show mnodes
+print dnode1 ==> $data2_1
+print dnode2 ==> $data2_2
+if $data2_1 != master then
+ goto show2
+endi
+if $data2_2 != slave then
+ goto show2
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/unique/mnode/testSuite.sim b/tests/script/unique/mnode/testSuite.sim
index 33df24b860d90ffd9d168a6788998b0955b2f347..b9adbe06a282548d56d7e7feb8a36f99198d8c0d 100644
--- a/tests/script/unique/mnode/testSuite.sim
+++ b/tests/script/unique/mnode/testSuite.sim
@@ -1,3 +1,4 @@
+run unique/mnode/mgmt21.sim
run unique/mnode/mgmt22.sim
run unique/mnode/mgmt23.sim
run unique/mnode/mgmt24.sim
diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat
index 1574b5013ef411972ce47b30d0558db5bf57b118..6cdd63b42d28437dded9dc7f99a3888b232c16fc 100644
--- a/tests/script/wtest.bat
+++ b/tests/script/wtest.bat
@@ -6,8 +6,8 @@ echo Start TDengine Testing Case ...
set "SCRIPT_DIR=%~dp0"
echo SCRIPT_DIR: %SCRIPT_DIR%
-set "BUILD_DIR=%~dp0..\..\debug\build\bin"
-set "TSIM=%~dp0..\..\debug\build\bin\tsim"
+set "BUILD_DIR=%~dp0..\..\debug\32\build\bin"
+set "TSIM=%~dp0..\..\debug\32\build\bin\tsim"
echo BUILD_DIR: %BUILD_DIR%
set "SIM_DIR=%~dp0..\..\sim"
@@ -47,7 +47,7 @@ echo qdebugFlag 143 >> %TAOS_CFG%
echo udebugFlag 143 >> %TAOS_CFG%
set "FILE_NAME=windows\testSuite.sim"
-set "FIRSTEP=localhost"
+set "FIRSTEP=192.168.1.182"
if "%1" == "-f" set "FILE_NAME=%2"
if "%1" == "-h" set "FIRSTEP=%2"
if "%3" == "-f" set "FILE_NAME=%4"
diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt
index 9d118951046a19a1190db4a666dca2ee6e8a2277..e1fedaee3cecb8f55fa882c71b32ab49014bb5ea 100644
--- a/tests/test/c/CMakeLists.txt
+++ b/tests/test/c/CMakeLists.txt
@@ -25,6 +25,9 @@ IF (TD_LINUX)
#add_executable(createTablePerformance createTablePerformance.c)
#target_link_libraries(createTablePerformance taos_static tutil common pthread)
- add_executable(createNormalTable createNormalTable.c)
- target_link_libraries(createNormalTable taos_static tutil common pthread)
+ #add_executable(createNormalTable createNormalTable.c)
+ #target_link_libraries(createNormalTable taos_static tutil common pthread)
+
+ add_executable(queryPerformance queryPerformance.c)
+ target_link_libraries(queryPerformance taos_static tutil common pthread)
ENDIF()
diff --git a/tests/test/c/queryPerformance.c b/tests/test/c/queryPerformance.c
new file mode 100644
index 0000000000000000000000000000000000000000..eda082dd4f293f0879603f6b71cc59150d6cfb3d
--- /dev/null
+++ b/tests/test/c/queryPerformance.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "os.h"
+#include "taos.h"
+#include "tulog.h"
+#include "ttimer.h"
+#include "tutil.h"
+#include "tglobal.h"
+
+#define MAX_RANDOM_POINTS 20000
+#define GREEN "\033[1;32m"
+#define NC "\033[0m"
+
+typedef struct {
+ int64_t startTimeMs;
+ int64_t endTimeMs;
+ int threadIndex;
+ pthread_t thread;
+} SInfo;
+
+void *syncTest(void *param);
+void shellParseArgument(int argc, char *argv[]);
+void queryData();
+
+int numOfThreads = 10;
+int useGlobalConn = 1;
+int requestPerThread = 10000;
+char requestSql[10240] = "show dnodes";
+TAOS *globalConn;
+
+int main(int argc, char *argv[]) {
+ shellParseArgument(argc, argv);
+ taos_init();
+ queryData();
+}
+
+void queryData() {
+ struct timeval systemTime;
+ int64_t st, et;
+ char fqdn[TSDB_FQDN_LEN];
+ uint16_t port;
+
+ if (useGlobalConn) {
+ taosGetFqdnPortFromEp(tsFirst, fqdn, &port);
+
+ globalConn = taos_connect(fqdn, "root", "taosdata", NULL, port);
+ if (globalConn == NULL) {
+ pError("failed to connect to DB, reason:%s", taos_errstr(globalConn));
+ exit(1);
+ }
+ }
+
+ pPrint("%d threads are spawned to query", numOfThreads);
+
+ gettimeofday(&systemTime, NULL);
+ st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
+
+ pthread_attr_t thattr;
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+ SInfo *pInfo = (SInfo *)malloc(sizeof(SInfo) * numOfThreads);
+
+ // Start threads to write
+ for (int i = 0; i < numOfThreads; ++i) {
+ pInfo[i].threadIndex = i;
+ pthread_create(&(pInfo[i].thread), &thattr, syncTest, (void *)(pInfo + i));
+ }
+
+ taosMsleep(300);
+ for (int i = 0; i < numOfThreads; i++) {
+ pthread_join(pInfo[i].thread, NULL);
+ }
+
+ gettimeofday(&systemTime, NULL);
+ et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
+ double totalTimeMs = (et - st) / 1000.0;
+
+ int totalReq = requestPerThread * numOfThreads;
+ float rspTime = totalTimeMs / requestPerThread;
+ float qps = totalReq / (totalTimeMs / 1000);
+
+ pPrint("%s threads:%d, totalTime %.1fms totalReq:%d qps:%.1f rspTime:%.3fms %s", GREEN, numOfThreads, totalTimeMs,
+ totalReq, qps, rspTime, NC);
+
+ pthread_attr_destroy(&thattr);
+ free(pInfo);
+}
+
+void *syncTest(void *param) {
+ TAOS * con;
+ SInfo * pInfo = (SInfo *)param;
+ char fqdn[TSDB_FQDN_LEN];
+ uint16_t port;
+
+ if (useGlobalConn) {
+ pPrint("thread:%d, start to run use global connection", pInfo->threadIndex);
+ con = globalConn;
+ } else {
+ pPrint("thread:%d, start to run, and create new conn", pInfo->threadIndex);
+ taosGetFqdnPortFromEp(tsFirst, fqdn, &port);
+
+ con = taos_connect(fqdn, "root", "taosdata", NULL, port);
+ if (con == NULL) {
+ pError("index:%d, failed to connect to DB, reason:%s", pInfo->threadIndex, taos_errstr(con));
+ exit(1);
+ }
+ }
+
+ for (int i = 0; i < requestPerThread; ++i) {
+ void *tres = taos_query(con, requestSql);
+
+ TAOS_ROW row = taos_fetch_row(tres);
+ if (row == NULL) {
+ taos_free_result(tres);
+ exit(0);
+ }
+
+ do {
+ row = taos_fetch_row(tres);
+ } while (row != NULL);
+
+ taos_free_result(tres);
+ }
+ return NULL;
+}
+
+void printHelp() {
+ char indent[10] = " ";
+ printf("Used to test the query performance of TDengine\n");
+
+ printf("%s%s\n", indent, "-c");
+ printf("%s%s%s%s\n", indent, indent, "Configuration directory, default is ", configDir);
+ printf("%s%s\n", indent, "-s");
+ printf("%s%s%s%s\n", indent, indent, "The sql to be executed, default is ", requestSql);
+ printf("%s%s\n", indent, "-r");
+ printf("%s%s%s%d\n", indent, indent, "Request per thread, default is ", requestPerThread);
+ printf("%s%s\n", indent, "-t");
+ printf("%s%s%s%d\n", indent, indent, "Number of threads to be used, default is ", numOfThreads);
+ printf("%s%s\n", indent, "-g");
+ printf("%s%s%s%d\n", indent, indent, "Whether to share connections between threads, default is ", useGlobalConn);
+
+ exit(EXIT_SUCCESS);
+}
+
+void shellParseArgument(int argc, char *argv[]) {
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) {
+ printHelp();
+ exit(0);
+ } else if (strcmp(argv[i], "-c") == 0) {
+ strcpy(configDir, argv[++i]);
+ } else if (strcmp(argv[i], "-s") == 0) {
+ strcpy(requestSql, argv[++i]);
+ } else if (strcmp(argv[i], "-r") == 0) {
+ requestPerThread = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-t") == 0) {
+ numOfThreads = atoi(argv[++i]);
+ } else if (strcmp(argv[i], "-g") == 0) {
+ useGlobalConn = atoi(argv[++i]);
+ } else {
+ }
+ }
+
+ pPrint("%s sql:%s %s", GREEN, requestSql, NC);
+ pPrint("%s requestPerThread:%d %s", GREEN, requestPerThread, NC);
+ pPrint("%s numOfThreads:%d %s", GREEN, numOfThreads, NC);
+ pPrint("%s useGlobalConn:%d %s", GREEN, useGlobalConn, NC);
+ pPrint("%s start to run %s", GREEN, NC);
+}