提交 64b12891 编写于 作者: sangshuduo's avatar sangshuduo

Merge branch 'develop' into feature/sangshuduo/snap

......@@ -19,6 +19,7 @@ SET(TD_MEM_CHECK FALSE)
SET(TD_PAGMODE_LITE FALSE)
SET(TD_SOMODE_STATIC FALSE)
SET(TD_POWER FALSE)
SET(TD_GODLL FALSE)
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
......
......@@ -29,24 +29,69 @@ For user manual, system design and architecture, engineering blogs, refer to [TD
# Building
At the moment, TDengine only supports building and running on Linux systems. You can choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) or from the source code. This quick guide is for installation from the source only.
To build TDengine, use [CMake](https://cmake.org/) 2.8 or higher versions in the project directory. Install CMake for example on Ubuntu:
```
sudo apt-get install -y cmake build-essential
To build TDengine, use [CMake](https://cmake.org/) 3.5 or higher versions in the project directory.
## Install tools
### Ubuntu & Debian:
```bash
sudo apt-get install -y gcc cmake build-essential git
```
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
To install openjdk-8 on Ubuntu:
To install openjdk-8:
```bash
sudo apt-get install -y openjdk-8-jdk
```
To install Apache Maven:
```bash
sudo apt-get install -y maven
```
### Centos 7:
```bash
sudo yum install -y gcc gcc-c++ make cmake3 epel-release git
sudo yum remove -y cmake
sudo ln -s /usr/bin/cmake3 /usr/bin/cmake
```
To install openjdk-8:
```bash
sudo yum install -y java-1.8.0-openjdk
```
sudo apt-get install openjdk-8-jdk
To install Apache Maven:
```bash
sudo yum install -y maven
```
To install Apache Maven on Ubuntu:
### Centos 8 & Fedora:
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git
```
sudo apt-get install maven
To install openjdk-8:
```bash
sudo dnf install -y java-1.8.0-openjdk
```
To install Apache Maven:
```bash
sudo dnf install -y maven
```
Build TDengine:
## Get the source codes
- github:
```bash
git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
## Build TDengine
```bash
mkdir debug && cd debug
cmake .. && cmake --build .
```
......@@ -54,12 +99,12 @@ cmake .. && cmake --build .
To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below:
aarch64:
```cmd
```bash
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
aarch32:
```cmd
```bash
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
......@@ -124,6 +169,7 @@ The TDengine community has also kindly built some of their own connectors! Follo
- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
# How to run the test cases and how to add a new test case?
TDengine's test framework and all test cases are fully open source.
......
......@@ -17,6 +17,10 @@ IF (TD_GODLL)
ADD_DEFINITIONS(-D_TD_GO_DLL_)
ENDIF ()
IF (TD_POWER)
ADD_DEFINITIONS(-D_TD_POWER_)
ENDIF ()
IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF ()
......
......@@ -27,6 +27,11 @@ IF (${SOMODE} MATCHES "static")
MESSAGE(STATUS "Link so using static mode")
ENDIF ()
IF (${DBNAME} MATCHES "power")
SET(TD_POWER TRUE)
MESSAGE(STATUS "power is true")
ENDIF ()
IF (${DLLTYPE} MATCHES "go")
SET(TD_GODLL TRUE)
MESSAGE(STATUS "input dll type: " ${DLLTYPE})
......
......@@ -4,7 +4,12 @@ IF (TD_LINUX)
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})")
ELSEIF (TD_WINDOWS)
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
IF (TD_POWER)
SET(CMAKE_INSTALL_PREFIX C:/PowerDB)
ELSE ()
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
ENDIF ()
IF (NOT TD_GODLL)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
......@@ -14,8 +19,14 @@ ELSEIF (TD_WINDOWS)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
IF (TD_POWER)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .)
ELSE ()
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
ENDIF ()
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
......@@ -30,4 +41,4 @@ ELSEIF (TD_DARWIN)
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin)")
ENDIF ()
\ No newline at end of file
ENDIF ()
......@@ -52,7 +52,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [连接器](https://www.taosdata.com/cn/documentation20/connector)
- [C/C++ Connector](https://www.taosdata.com/cn/documentation20/connector/#C/C++-Connector):通过libtaos客户端的库,连接TDengine服务器的主要方法
- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector/#Java-Connector):通过标准的JDBC API,给Java应用提供到TDengine的连接
- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector-java):通过标准的JDBC API,给Java应用提供到TDengine的连接
- [Python Connector](https://www.taosdata.com/cn/documentation20/connector/#Python-Connector):给Python应用提供一个连接TDengine服务器的驱动
- [RESTful Connector](https://www.taosdata.com/cn/documentation20/connector/#RESTful-Connector):提供一最简单的连接TDengine服务器的方式
- [Go Connector](https://www.taosdata.com/cn/documentation20/connector/#Go-Connector):给Go应用提供一个连接TDengine服务器的驱动
......
......@@ -42,7 +42,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
| | 类型 | Bytes | 说明 |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] |
......
......@@ -280,365 +280,10 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
## Java Connector
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
* 由于不支持删除和修改,所以也不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 2.0.2 | 2.0.0.x 及以上 | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
maven 项目中使用如下 pom.xml 配置即可:
```xml
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.2</version>
</dependency>
```
### 源码编译打包
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
## 使用说明
### 获取连接
如下所示配置即可获取 TDengine Connection:
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
TDengine 的 JDBC URL 规范格式为:
`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
* user:登录 TDengine 用户名,默认值 root。
* password:用户登录密码,默认值 taosdata。
* charset:客户端使用的字符集,默认值为系统字符集。
* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
* locale:客户端语言环境,默认值系统当前 locale。
* timezone:客户端使用的时区,默认值为系统当前时区。
以上参数可以在 3 处配置,`优先级由高到低`分别如下:
1. JDBC URL 参数
如上所述,可以在 JDBC URL 的参数中指定。
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
3. 客户端配置文件 taos.cfg
linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
```properties
# client default username
# defaultUser root
# client default password
# defaultPass taosdata
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
> 更多详细配置请参考[客户端配置][13]
### 创建数据库和表
```java
Statement stmt = conn.createStatement();
// create database
stmt.executeUpdate("create database if not exists db");
// use database
stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
```java
// insert data
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
### 查询数据
```java
// query data
ResultSet resultSet = stmt.executeQuery("select * from tb");
Timestamp ts = null;
int temperature = 0;
float humidity = 0;
while(resultSet.next()){
ts = resultSet.getTimestamp(1);
temperature = resultSet.getInt(2);
humidity = resultSet.getFloat("humidity");
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 订阅
#### 创建
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
```
`subscribe` 方法的三个参数含义如下:
* topic:订阅的主题(即名称),此参数是订阅的唯一标识
* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
#### 消费数据
```java
int total = 0;
while(true) {
TSDBResultSet rs = sub.consume();
int count = 0;
while(rs.next()) {
count++;
}
total += count;
System.out.printf("%d rows consumed, total %d\n", count, total);
Thread.sleep(1000);
}
```
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
```java
sub.close(true);
```
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
### 关闭资源
```java
resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**
* 引入相应 HikariCP maven 依赖:
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* 使用示例如下:
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
**Druid**
* 引入相应 Druid maven 依赖:
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* 使用示例如下:
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
如下所示,`select server_status()` 执行成功会返回 `1`。
```shell
taos> select server_status();
server_status()|
================
1 |
Query OK, 1 row(s) in set (0.000141s)
```
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**原因**:目前 TDengine 只支持 64 位 JDK。
**解决方法**:重新安装 64 位 JDK。
* 其它问题请参考 [Issues][7]
## Python Connector
### 安装准备
* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口)
* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)][4]
* 已安装python 2.7 or >= 3.4
* 已安装pip
......@@ -1137,18 +782,5 @@ promise2.then(function(result) {
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
[5]: https://github.com/brettwooldridge/HikariCP
[6]: https://github.com/alibaba/druid
[7]: https://github.com/taosdata/TDengine/issues
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/documentation20/connector/#Windows
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
[4]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
# Java Connector
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
* 由于不支持删除和修改,所以也不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 2.0.4 | 2.0.0.x 及以上 | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
maven 项目中使用如下 pom.xml 配置即可:
```xml
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.4</version>
</dependency>
```
### 源码编译打包
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
## 使用说明
### 获取连接
如下所示配置即可获取 TDengine Connection:
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
TDengine 的 JDBC URL 规范格式为:
`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
* user:登录 TDengine 用户名,默认值 root。
* password:用户登录密码,默认值 taosdata。
* charset:客户端使用的字符集,默认值为系统字符集。
* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
* locale:客户端语言环境,默认值系统当前 locale。
* timezone:客户端使用的时区,默认值为系统当前时区。
以上参数可以在 3 处配置,`优先级由高到低`分别如下:
1. JDBC URL 参数
如上所述,可以在 JDBC URL 的参数中指定。
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
3. 客户端配置文件 taos.cfg
linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
```properties
# client default username
# defaultUser root
# client default password
# defaultPass taosdata
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
> 更多详细配置请参考[客户端配置][13]
### 创建数据库和表
```java
Statement stmt = conn.createStatement();
// create database
stmt.executeUpdate("create database if not exists db");
// use database
stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
```java
// insert data
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
### 查询数据
```java
// query data
ResultSet resultSet = stmt.executeQuery("select * from tb");
Timestamp ts = null;
int temperature = 0;
float humidity = 0;
while(resultSet.next()){
ts = resultSet.getTimestamp(1);
temperature = resultSet.getInt(2);
humidity = resultSet.getFloat("humidity");
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 订阅
#### 创建
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
```
`subscribe` 方法的三个参数含义如下:
* topic:订阅的主题(即名称),此参数是订阅的唯一标识
* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
#### 消费数据
```java
int total = 0;
while(true) {
TSDBResultSet rs = sub.consume();
int count = 0;
while(rs.next()) {
count++;
}
total += count;
System.out.printf("%d rows consumed, total %d\n", count, total);
Thread.sleep(1000);
}
```
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
```java
sub.close(true);
```
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
### 关闭资源
```java
resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**
* 引入相应 HikariCP maven 依赖:
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* 使用示例如下:
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
**Druid**
* 引入相应 Druid maven 依赖:
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* 使用示例如下:
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
如下所示,`select server_status()` 执行成功会返回 `1`。
```shell
taos> select server_status();
server_status()|
================
1 |
Query OK, 1 row(s) in set (0.000141s)
```
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**原因**:目前 TDengine 只支持 64 位 JDK。
**解决方法**:重新安装 64 位 JDK。
* 其它问题请参考 [Issues][7]
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
[5]: https://github.com/brettwooldridge/HikariCP
[6]: https://github.com/alibaba/druid
[7]: https://github.com/taosdata/TDengine/issues
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
......@@ -23,17 +23,87 @@
客户端遇到链接故障,请按照下面的步骤进行检查:
1. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
2. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
3. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
4. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
5. 检查防火墙设置,确认TCP/UDP 端口6030-6039 是打开的
6. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*
7. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
8. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
1. 检查网络环境
* 云服务器:检查云服务器的安全组是否打开TCP/UDP 端口6030-6042的访问权限
* 本地虚拟机:检查网络能否ping通,尽量避免使用`localhost` 作为hostname
* 公司服务器:如果为NAT网络环境,请务必检查服务器能否将消息返回值客户端
2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*
8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} `
检查服务器侧TCP端口连接是否工作:`nc -l {port}`
检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}`
10. 可以使用taos程序内嵌的网络连通检测功能:验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP)。
taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server:表示运行检测服务端功能;-n client:表示运行检测客户端功能。
1)首先在服务器上停止taosd服务;
2)在服务器上运行taos内嵌的网络连通检测的服务端功能:taos -n server -P 6030 -e 6042 -l 1000;
3)在客户端运行taos内嵌的网络连通检测的客户端功能:taos -n client -h host -P 6030 -e 6042 -l 1000;
-n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测;
-h:指示服务端名称,可以是ip地址或fqdn格式。如:192.168.1.160,或 192.168.1.160:6030,或 hostname1,或hostname1:6030。缺省值是127.0.0.1。
-P :检测的起始端口号,缺省值是6030;
-e:检测的结束端口号,必须大于等于起始端口号,缺省值是6042;
-l:指定检测端口连通的报文长度,最大64000字节,缺省值是1000字节,测试时服务端和客户端必须指定相同;
服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号;
对于起始端口号有三种设置方式:缺省值、-h指定、-P指定,优先级是:-P指定 > -h指定 > 缺省值。
客户端运行的输出样例:
`sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6`
`host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000`
`tcp port:6030 test ok. udp port:6030 test ok.`
`tcp port:6031 test ok. udp port:6031 test ok.`
`tcp port:6032 test ok. udp port:6032 test ok.`
`tcp port:6033 test ok. udp port:6033 test ok.`
`tcp port:6034 test ok. udp port:6034 test ok.`
`tcp port:6035 test ok. udp port:6035 test ok.`
`tcp port:6036 test ok. udp port:6036 test ok.`
`tcp port:6037 test ok. udp port:6037 test ok.`
`tcp port:6038 test ok. udp port:6038 test ok.`
`tcp port:6039 test ok. udp port:6039 test ok.`
`tcp port:6040 test ok. udp port:6040 test ok.`
`tcp port:6041 test ok. udp port:6041 test ok.`
`tcp port:6042 test ok. udp port:6042 test ok.`
如果某个端口不通,会输出 `port:xxxx test fail`的信息。
## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办?
......
#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
# Thank you to the Elastic Search authors
#
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
# Provides: PowerDB
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts PowerDB powerd
# Description: Starts PowerDB powerd, a time-series database engine
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
NAME="PowerDB"
USER="root"
GROUP="root"
DAEMON="/usr/local/power/bin/powerd"
DAEMON_OPTS=""
PID_FILE="/var/run/$NAME.pid"
APPARGS=""
# Maximum number of open files
MAX_OPEN_FILES=65535
. /lib/lsb/init-functions
case "$1" in
start)
log_action_begin_msg "Starting PowerDB..."
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
log_end_msg $?
fi
;;
stop)
log_action_begin_msg "Stopping PowerDB..."
set +e
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
if [ $? -eq 1 ]; then
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop PowerDB (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_action_cont_msg "PowerDB was not running"
fi
log_action_end_msg 0
set -e
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
sleep 1
fi
$0 start
;;
status)
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
;;
*)
exit 1
;;
esac
exit 0
......@@ -11,6 +11,7 @@ set -e
# -V [stable | beta]
# -l [full | lite]
# -s [static | dynamic]
# -d [taos | power]
# -n [2.0.0.3]
# -m [2.0.0.0]
......@@ -21,10 +22,11 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
dbName=taos # [taos | power]
verNumber=""
verNumberComp="2.0.0.0"
while getopts "hv:V:c:o:l:s:n:m:" arg
while getopts "hv:V:c:o:l:s:d:n:m:" arg
do
case $arg in
v)
......@@ -47,6 +49,10 @@ do
#echo "soMode=$OPTARG"
soMode=$(echo $OPTARG)
;;
d)
#echo "dbName=$OPTARG"
dbName=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
......@@ -66,6 +72,7 @@ do
echo " -V [stable | beta] "
echo " -l [full | lite] "
echo " -s [static | dynamic] "
echo " -d [taos | power] "
echo " -n [version number] "
echo " -m [compatible version number] "
exit 0
......@@ -77,7 +84,7 @@ do
esac
done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} verNumber=${verNumber} verNumberComp=${verNumberComp}"
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} verNumber=${verNumber} verNumberComp=${verNumberComp}"
curr_dir=$(pwd)
......@@ -170,9 +177,9 @@ cd ${compile_dir}
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
if [ "$verMode" != "cluster" ]; then
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
else
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
fi
else
echo "input cpuType=${cpuType} error!!!"
......@@ -185,7 +192,7 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
......@@ -208,11 +215,17 @@ if [ "$osType" != "Darwin" ]; then
echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools
${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
if [[ "$dbName" == "taos" ]]; then
${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
fi
else
cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName}
fi
#!/bin/bash
#
# power This shell script takes care of starting and stopping PowerDB.
#
# chkconfig: 2345 99 01
# description: PowerDB is a districuted, scalable, high-performance Time Series Database
# (TSDB). More than just a pure database, PowerDB also provides the ability
# to do stream computing, aggregation etc.
#
#
### BEGIN INIT INFO
# Provides: powerd
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Short-Description: start and stop powerd
# Description: PowerDB is a districuted, scalable, high-performance Time Series Database
# (TSDB). More than just a pure database, PowerDB also provides the ability
# to do stream computing, aggregation etc.
### END INIT INFO
# Source init functions
. /etc/init.d/functions
# Maximum number of open files
MAX_OPEN_FILES=65535
# Default program options
NAME=powerd
PROG=/usr/local/power/bin/powerd
USER=root
GROUP=root
# Default directories
LOCK_DIR=/var/lock/subsys
PID_DIR=/var/run/$NAME
# Set file names
LOCK_FILE=$LOCK_DIR/$NAME
PID_FILE=$PID_DIR/$NAME.pid
[ -e $PID_DIR ] || mkdir -p $PID_DIR
PROG_OPTS=""
start() {
echo -n "Starting ${NAME}: "
# check identity
curid="`id -u -n`"
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
echo "Must be run as root or $USER, but was run as $curid"
return 1
fi
# Sets the maximum number of open file descriptors allowed.
ulimit -n $MAX_OPEN_FILES
curulimit="`ulimit -n`"
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
return 1
fi
if [ "`id -u -n`" == root ] ; then
# Changes the owner of the lock, and the pid files to allow
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
else
# Don't have to change user.
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
fi
retval=$?
sleep 2
echo
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
return $retval
}
stop() {
echo -n "Stopping ${NAME}: "
killproc -p $PID_FILE $NAME
retval=$?
echo
# Non-root users don't have enough permission to remove pid and lock files.
# So, the opentsdb_restart.py cannot get rid of the files, and the command
# "service opentsdb status" will complain about the existing pid file.
# Makes the pid file empty.
echo > $PID_FILE
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
# run checks to determine if the service is running or use generic status
status -p $PID_FILE -l $LOCK_FILE $NAME
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?
#!/bin/bash
#
# This file is used to install database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -f "$0"))
bin_link_dir="/usr/bin"
#inc_link_dir="/usr/include"
#install main path
install_main_dir="/usr/local/tarbitrator"
# old bin dir
bin_dir="/usr/local/tarbitrator/bin"
service_config_dir="/etc/systemd/system"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
update_flag=0
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
# get the operating system type for using the corresponding init file
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
if [[ -e /etc/os-release ]]; then
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
else
osinfo=""
fi
#echo "osinfo: ${osinfo}"
os_type=0
if echo $osinfo | grep -qwi "ubuntu" ; then
# echo "This is ubuntu system"
os_type=1
elif echo $osinfo | grep -qwi "debian" ; then
# echo "This is debian system"
os_type=1
elif echo $osinfo | grep -qwi "Kylin" ; then
# echo "This is Kylin system"
os_type=1
elif echo $osinfo | grep -qwi "centos" ; then
# echo "This is centos system"
os_type=2
elif echo $osinfo | grep -qwi "fedora" ; then
# echo "This is fedora system"
os_type=2
else
echo " osinfo: ${osinfo}"
echo " This is an officially unverified linux system,"
echo " if there are any problems with the installation and operation, "
echo " please feel free to contact taosdata.com for support."
os_type=1
fi
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function install_main_path() {
#create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/bin
#${csudo} mkdir -p ${install_main_dir}/include
${csudo} mkdir -p ${install_main_dir}/init.d
}
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/remove_arbi_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_power.sh ${bin_link_dir}/rmtarbitrator || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
}
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function clean_service_on_sysvinit() {
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} insserv -r tarbitratord || :
fi
elif ((${initd_mod}==3)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
fi
fi
${csudo} rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
}
function install_service_on_sysvinit() {
clean_service_on_sysvinit
sleep 1
# Install powerd service
if ((${os_type}==1)); then
${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
elif ((${os_type}==2)); then
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
fi
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
if ((${initd_mod}==1)); then
${csudo} chkconfig --add tarbitratord || :
${csudo} chkconfig --level 2345 tarbitratord on || :
elif ((${initd_mod}==2)); then
${csudo} insserv tarbitratord || :
${csudo} insserv -d tarbitratord || :
elif ((${initd_mod}==3)); then
${csudo} update-rc.d tarbitratord defaults || :
fi
}
function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
if systemctl is-active --quiet tarbitratord; then
echo "tarbitrator is running, stopping it..."
${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
}
# power:2345:respawn:/etc/init.d/tarbitratord start
function install_service_on_systemd() {
clean_service_on_systemd
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Description=PowerDB arbitrator service' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
${csudo} systemctl enable tarbitratord
}
function install_service() {
if ((${service_mod}==0)); then
install_service_on_systemd
elif ((${service_mod}==1)); then
install_service_on_sysvinit
else
# must manual stop taosd
kill_tarbitrator
fi
}
function update_PowerDB() {
# Start to update
echo -e "${GREEN}Start to update PowerDB's arbitrator ...${NC}"
# Stop the service if running
if pidof tarbitrator &> /dev/null; then
if ((${service_mod}==0)); then
${csudo} systemctl stop tarbitratord || :
elif ((${service_mod}==1)); then
${csudo} service tarbitratord stop || :
else
kill_tarbitrator
fi
sleep 1
fi
install_main_path
#install_header
install_bin
install_service
echo
#echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
fi
echo
echo -e "\033[44;32;1mPowerDB's arbitrator is updated successfully!${NC}"
}
function install_PowerDB() {
# Start to install
echo -e "${GREEN}Start to install PowerDB's arbitrator ...${NC}"
install_main_path
#install_header
install_bin
install_service
echo
#echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
else
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
fi
echo -e "\033[44;32;1mPowerDB's arbitrator is installed successfully!${NC}"
echo
}
## ==============================Main program starts from here============================
# Install server and client
if [ -x ${bin_dir}/tarbitrator ]; then
update_flag=1
update_PowerDB
else
install_PowerDB
fi
......@@ -85,7 +85,7 @@ function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" == "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
......@@ -95,7 +95,7 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" == "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
......
#!/bin/bash
#
# This file is used to install PowerDB client on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
#set -x
# -----------------------Variables definition---------------------
osType=Linux
pagMode=full
if [ "$osType" != "Darwin" ]; then
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/power"
log_dir="/var/log/power"
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
data_dir="/var/lib/power"
log_dir="~/PowerDBLog"
fi
log_link_dir="/usr/local/power/log"
cfg_install_dir="/etc/power"
if [ "$osType" != "Darwin" ]; then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
else
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
fi
#install main path
install_main_dir="/usr/local/power"
# old bin dir
bin_dir="/usr/local/power/bin"
# v1.5 jar dir
#v15_java_app_dir="/usr/local/lib/power"
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
update_flag=0
function kill_client() {
pid=$(ps -ef | grep "power" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function install_main_path() {
#create install main dir and all sub dir
${csudo} rm -rf ${install_main_dir} || :
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/cfg
${csudo} mkdir -p ${install_main_dir}/bin
${csudo} mkdir -p ${install_main_dir}/connector
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} mkdir -p ${install_main_dir}/examples
${csudo} mkdir -p ${install_main_dir}/include
}
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/power || :
if [ "$osType" == "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/powerdemo || :
fi
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
if [ "$osType" == "Darwin" ]; then
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
fi
[ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
}
function clean_lib() {
sudo rm -f /usr/lib/libtaos.* || :
sudo rm -rf ${lib_dir} || :
}
function install_lib() {
# Remove links
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo} rm -rf ${v15_java_app_dir} || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
if [ "$osType" != "Darwin" ]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ -d "${lib64_link_dir}" ]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
else
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
${csudo} ldconfig
}
function install_header() {
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_config() {
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
}
function install_log() {
${csudo} rm -rf ${log_dir} || :
if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
else
mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
fi
${csudo} ln -s ${log_dir} ${install_main_dir}/log
}
function install_connector() {
${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
}
function install_examples() {
if [ -d ${script_dir}/examples ]; then
${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
fi
}
function update_PowerDB() {
# Start to update
if [ ! -e power.tar.gz ]; then
echo "File power.tar.gz does not exist"
exit 1
fi
tar -zxf power.tar.gz
echo -e "${GREEN}Start to update PowerDB client...${NC}"
# Stop the client shell if running
if pidof power &> /dev/null; then
kill_client
sleep 1
fi
install_main_path
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples
install_bin
install_config
echo
echo -e "\033[44;32;1mPowerDB client is updated successfully!${NC}"
rm -rf $(tar -tf power.tar.gz)
}
function install_PowerDB() {
# Start to install
if [ ! -e power.tar.gz ]; then
echo "File power.tar.gz does not exist"
exit 1
fi
tar -zxf power.tar.gz
echo -e "${GREEN}Start to install PowerDB client...${NC}"
install_main_path
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples
install_bin
install_config
echo
echo -e "\033[44;32;1mPowerDB client is installed successfully!${NC}"
rm -rf $(tar -tf power.tar.gz)
}
## ==============================Main program starts from here============================
# Install or updata client and client
# if server is already install, don't install client
if [ -e ${bin_dir}/powerd ]; then
echo -e "\033[44;32;1mThere are already installed PowerDB server, so don't need install client!${NC}"
exit 0
fi
if [ -x ${bin_dir}/power ]; then
update_flag=1
update_PowerDB
else
install_PowerDB
fi
此差异已折叠。
......@@ -179,19 +179,18 @@ function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c)
if [ "$osType" != "Darwin" ]; then
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1
${csudo} cp ${binary_dir}/build/lib/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ -d "${lib64_link_dir}" ]; then
${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib64_link_dir}/libtaos.so.1
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
fi
else
${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib
${csudo} cp ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
......
#!/bin/bash
#
# Generate arbitrator's tar.gz setup package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-arbitrator"
else
install_dir="${release_dir}/PowerDB-arbitrator"
fi
# Directories and files.
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_power.sh"
install_files="${script_dir}/install_arbi_power.sh"
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
# make directories.
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_power.sh || :
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}
......@@ -41,7 +41,7 @@ fi
if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
#strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
......
#!/bin/bash
#
# Generate tar.gz package for linux client in all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
if [ "$osType" != "Darwin" ]; then
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
top_dir=${script_dir}/../..
fi
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-client"
else
install_dir="${release_dir}/PowerDB-client"
fi
# Directories and files.
if [ "$osType" != "Darwin" ]; then
# if [ "$pagMode" == "lite" ]; then
# strip ${build_dir}/bin/powerd
# strip ${build_dir}/bin/power
# bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh"
# else
# bin_files="${build_dir}/bin/power ${build_dir}/bin/powerdemo ${script_dir}/remove_client_power.sh ${script_dir}/set_core.sh"
# fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh"
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
cfg_dir="${top_dir}/packaging/cfg"
install_files="${script_dir}/install_client_power.sh"
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
mkdir -p ${install_dir}/bin
if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
else
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${script_dir}/set_core.sh ${install_dir}/bin
fi
else
cp ${bin_files} ${install_dir}/bin
fi
chmod a+x ${install_dir}/bin/* || :
cd ${install_dir}
if [ "$osType" != "Darwin" ]; then
tar -zcv -f power.tar.gz * --remove-files || :
else
tar -zcv -f power.tar.gz * || :
mv power.tar.gz ..
rm -rf ./*
mv ../power.tar.gz .
fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$osType" == "Darwin" ]; then
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_power.sh >> install_client_power_temp.sh
mv install_client_power_temp.sh ${install_dir}/install_client_power.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_power.sh >> install_client_power_temp.sh
mv install_client_power_temp.sh ${install_dir}/install_client_power.sh
fi
chmod a+x ${install_dir}/install_client_power.sh
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
sed -i '/passwd/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
cp -r ${examples_dir}/python ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go
fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
# exit 1
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stable or beta"
exit 1
fi
if [ "$osType" != "Darwin" ]; then
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
else
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
mv "$(basename ${pkg_name}).tar.gz" ..
rm -rf ./*
mv ../"$(basename ${pkg_name}).tar.gz" .
fi
cd ${curr_dir}
#!/bin/bash
#
# Generate tar.gz package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -f ${script_dir}/../..)"
# create compressed install file.
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-server"
else
install_dir="${release_dir}/PowerDB-server"
fi
# Directories and files.
#if [ "$pagMode" == "lite" ]; then
# strip ${build_dir}/bin/taosd
# strip ${build_dir}/bin/taos
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
#else
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
#fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
cfg_dir="${top_dir}/packaging/cfg"
install_files="${script_dir}/install_power.sh"
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
# Init file
#init_dir=${script_dir}/deb
#if [ $package_type = "centos" ]; then
# init_dir=${script_dir}/rpm
#fi
#init_files=${init_dir}/powerd
# temp use rpm's powerd. TODO: later modify according to os type
init_file_deb=${script_dir}/../deb/powerd
init_file_rpm=${script_dir}/../rpm/powerd
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/bin
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
else
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin
fi
chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/powerd.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/powerd.rpm
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_power.sh >> remove_power_temp.sh
mv remove_power_temp.sh ${install_dir}/bin/remove_power.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js
sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
if [ "$cpuType" == "aarch64" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
elif [ "$cpuType" == "aarch32" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
fi
rm -rf ${install_dir}/nginxd/sbin/arm
fi
cd ${install_dir}
tar -zcv -f power.tar.gz * --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar power.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}
cp ${install_files} ${install_dir}
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_power.sh >> install_power_temp.sh
mv install_power_temp.sh ${install_dir}/install_power.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_power_temp.sh
mv install_power_temp.sh ${install_dir}/install_power.sh
fi
chmod a+x ${install_dir}/install_power.sh
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
sed -i '/passwd/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
cp -r ${examples_dir}/python ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go
fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
# exit 1
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}
......@@ -102,36 +102,31 @@ function clean_log() {
function clean_service_on_systemd() {
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
if systemctl is-active --quiet ${taos_service_name}; then
echo "TDengine taosd is running, stopping it..."
${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config}
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
echo "TDengine tarbitrator is running, stopping it..."
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
if [ "$verMode" == "cluster" ]; then
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
echo "TDengine tarbitrator is running, stopping it..."
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${bin_dir}/web ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
echo "Nginx for TDengine is running, stopping it..."
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${nginx_service_config}
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${bin_dir}/web ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
echo "Nginx for TDengine is running, stopping it..."
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${nginx_service_config}
fi
fi
}
......@@ -227,3 +222,4 @@ elif echo $osinfo | grep -qwi "centos" ; then
fi
echo -e "${GREEN}TDengine is removed successfully!${NC}"
echo
\ No newline at end of file
#!/bin/bash
#
# Script to stop the service and uninstall PowerDB's arbitrator
set -e
#set -x
verMode=edge
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
#install main path
install_main_dir="/usr/local/tarbitrator"
bin_link_dir="/usr/bin"
#inc_link_dir="/usr/include"
service_config_dir="/etc/systemd/system"
tarbitrator_service_name="tarbitratord"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
}
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_log() {
# Remove link
${csudo} rm -rf /arbitrator.log || :
}
function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
echo "PowerDB tarbitrator is running, stopping it..."
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
}
function clean_service_on_sysvinit() {
if pidof tarbitrator &> /dev/null; then
echo "PowerDB's tarbitrator is running, stopping it..."
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} insserv -r tarbitratord || :
fi
elif ((${initd_mod}==3)); then
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
fi
fi
${csudo} rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
}
function clean_service() {
if ((${service_mod}==0)); then
clean_service_on_systemd
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
# must manual stop
kill_tarbitrator
fi
}
# Stop service and disable booting start.
clean_service
# Remove binary file and links
clean_bin
# Remove header file.
##clean_header
# Remove log file
clean_log
${csudo} rm -rf ${install_main_dir}
echo -e "${GREEN}PowerDB's arbitrator is removed successfully!${NC}"
echo
\ No newline at end of file
......@@ -39,6 +39,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
......@@ -80,3 +81,4 @@ clean_config
${csudo} rm -rf ${install_main_dir}
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
echo
#!/bin/bash
#
# Script to stop the client and uninstall database, but retain the config and log files.
set -e
# set -x
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
#install main path
install_main_dir="/usr/local/power"
log_link_dir="/usr/local/power/log"
cfg_link_dir="/usr/local/power/cfg"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
# v1.5 jar dir
#v15_java_app_dir="/usr/local/lib/power"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
function kill_client() {
#pid=$(ps -ef | grep "power" | grep -v "grep" | awk '{print $2}')
if [ -n "$(pidof power)" ]; then
${csudo} kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
# Remove link
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo} rm -rf ${v15_java_app_dir} || :
}
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
# Remove link
${csudo} rm -rf ${log_link_dir} || :
}
# Stop client.
kill_client
# Remove binary file and links
clean_bin
# Remove header file.
clean_header
# Remove lib file
clean_lib
# Remove link log directory
clean_log
# Remove link configuration file
clean_config
${csudo} rm -rf ${install_main_dir}
echo -e "${GREEN}PowerDB client is removed successfully!${NC}"
echo
#!/bin/bash
#
# Script to stop the service and uninstall TDengine, but retain the config, data and log files.
set -e
#set -x
verMode=edge
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
#install main path
install_main_dir="/usr/local/power"
data_link_dir="/usr/local/power/data"
log_link_dir="/usr/local/power/log"
cfg_link_dir="/usr/local/power/cfg"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
install_nginxd_dir="/usr/local/nginxd"
# v1.5 jar dir
#v15_java_app_dir="/usr/local/lib/power"
service_config_dir="/etc/systemd/system"
power_service_name="powerd"
tarbitrator_service_name="tarbitratord"
nginx_service_name="nginxd"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
function kill_powerd() {
pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function kill_tarbitrator() {
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :
}
function clean_lib() {
# Remove link
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
#${csudo} rm -rf ${v15_java_app_dir} || :
}
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
# Remove link
${csudo} rm -rf ${log_link_dir} || :
}
function clean_service_on_systemd() {
power_service_config="${service_config_dir}/${power_service_name}.service"
if systemctl is-active --quiet ${power_service_name}; then
echo "PowerDB powerd is running, stopping it..."
${csudo} systemctl stop ${power_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${power_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${power_service_config}
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
echo "TDengine tarbitrator is running, stopping it..."
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tarbitratord_service_config}
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${bin_dir}/web ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
echo "Nginx for TDengine is running, stopping it..."
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${nginx_service_config}
fi
fi
}
function clean_service_on_sysvinit() {
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof powerd &> /dev/null; then
echo "PowerDB powerd is running, stopping it..."
${csudo} service powerd stop || :
fi
if pidof tarbitrator &> /dev/null; then
echo "PowerDB tarbitrator is running, stopping it..."
${csudo} service tarbitratord stop || :
fi
if ((${initd_mod}==1)); then
if [ -e ${service_config_dir}/powerd ]; then
${csudo} chkconfig --del powerd || :
fi
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} chkconfig --del tarbitratord || :
fi
elif ((${initd_mod}==2)); then
if [ -e ${service_config_dir}/powerd ]; then
${csudo} insserv -r powerd || :
fi
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} insserv -r tarbitratord || :
fi
elif ((${initd_mod}==3)); then
if [ -e ${service_config_dir}/powerd ]; then
${csudo} update-rc.d -f powerd remove || :
fi
if [ -e ${service_config_dir}/tarbitratord ]; then
${csudo} update-rc.d -f tarbitratord remove || :
fi
fi
${csudo} rm -f ${service_config_dir}/powerd || :
${csudo} rm -f ${service_config_dir}/tarbitratord || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
}
function clean_service() {
if ((${service_mod}==0)); then
clean_service_on_systemd
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
# must manual stop taosd
kill_powerd
kill_tarbitrator
fi
}
# Stop service and disable booting start.
clean_service
# Remove binary file and links
clean_bin
# Remove header file.
clean_header
# Remove lib file
clean_lib
# Remove link log directory
clean_log
# Remove link configuration file
clean_config
# Remove data link directory
${csudo} rm -rf ${data_link_dir} || :
${csudo} rm -rf ${install_main_dir}
${csudo} rm -rf ${install_nginxd_dir}
if [[ -e /etc/os-release ]]; then
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
else
osinfo=""
fi
#if echo $osinfo | grep -qwi "ubuntu" ; then
## echo "this is ubuntu system"
# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
#elif echo $osinfo | grep -qwi "debian" ; then
## echo "this is debian system"
# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
#elif echo $osinfo | grep -qwi "centos" ; then
## echo "this is centos system"
# ${csudo} rpm -e --noscripts tdengine || :
#fi
echo -e "${GREEN}PowerDB is removed successfully!${NC}"
echo
\ No newline at end of file
......@@ -23,12 +23,8 @@ IF (TD_LINUX)
#set version of .so
#VERSION so version
#SOVERSION api version
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
OUTPUT_VARIABLE
VERSION_INFO)
MESSAGE(STATUS "build version ${VERSION_INFO}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1)
#MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
ADD_SUBDIRECTORY(tests)
......@@ -65,11 +61,7 @@ ELSEIF (TD_DARWIN)
#set version of .so
#VERSION so version
#SOVERSION api version
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
OUTPUT_VARIABLE
VERSION_INFO)
MESSAGE(STATUS "build version ${VERSION_INFO}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1)
#MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
ENDIF ()
......@@ -39,7 +39,6 @@ extern "C" {
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
typedef struct SParsedColElem {
int16_t colIndex;
......@@ -70,6 +69,8 @@ typedef struct SJoinSupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
char intervalTimeUnit;
char slidingTimeUnit;
int64_t intervalTime; // interval time
int64_t slidingTime; // sliding time
SLimitVal limit; // limit info
......
......@@ -229,8 +229,9 @@ typedef struct STableDataBlocks {
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
char intervalTimeUnit;
char slidingTimeUnit;
uint32_t type; // query/insert type
STimeWindow window; // query time window
int64_t intervalTime; // aggregation time interval
int64_t slidingTime; // sliding window in mseconds
......@@ -366,6 +367,8 @@ typedef struct SSqlStream {
uint32_t streamId;
char listed;
bool isProject;
char intervalTimeUnit;
char slidingTimeUnit;
int16_t precision;
int64_t num; // number of computing count
......@@ -379,7 +382,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
int64_t interval;
int64_t intervalTime;
int64_t slidingTime;
void * pTimer;
......@@ -468,7 +471,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
// user defined constant value output columns
if (pInfo->pSqlExpr->colInfo.flag == TSDB_COL_UDC) {
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
pData = pInfo->pSqlExpr->param[1].pz;
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
......
......@@ -1648,9 +1648,10 @@ static void last_function(SQLFunctionCtx *pCtx) {
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
continue;
if (!pCtx->requireNull) {
continue;
}
}
memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes);
TSKEY ts = pCtx->ptsList[i];
......@@ -1721,7 +1722,9 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
continue;
if (!pCtx->requireNull) {
continue;
}
}
last_data_assign_impl(pCtx, data, i);
......
......@@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
pSdesc->stime = htobe64(pStream->stime - pStream->interval);
pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->slidingTime);
pSdesc->interval = htobe64(pStream->interval);
pSdesc->interval = htobe64(pStream->intervalTime);
pHeartbeat->numOfStreams++;
pSdesc++;
......
......@@ -33,6 +33,8 @@
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
// -1 is tbname column index, so here use the -3 as the initial value
#define COLUMN_INDEX_INITIAL_VAL (-3)
#define COLUMN_INDEX_INITIALIZER \
......@@ -45,6 +47,10 @@ typedef struct SColumnList { // todo refactor
SColumnIndex ids[TSDB_MAX_COLUMNS];
} SColumnList;
typedef struct SConvertFunc {
int32_t originFuncId;
int32_t execFuncId;
} SConvertFunc;
static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex);
static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
......@@ -587,21 +593,20 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
// interval is not null
SStrToken* t = &pQuerySql->interval;
if (getTimestampInUsFromStr(t->z, t->n, &pQueryInfo->intervalTime) != TSDB_CODE_SUCCESS) {
if (parseDuration(t->z, t->n, &pQueryInfo->intervalTime, &pQueryInfo->intervalTimeUnit) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
// if the unit of time window value is millisecond, change the value from microsecond
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
}
/* parser has filter the illegal type, no need to check here */
pQueryInfo->slidingTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1];
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
// if the unit of time window value is millisecond, change the value from microsecond
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
}
// interval cannot be less than 10 milliseconds
if (pQueryInfo->intervalTime < tsMinIntervalTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
// interval cannot be less than 10 milliseconds
if (pQueryInfo->intervalTime < tsMinIntervalTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
// for top/bottom + interval query, we do not add additional timestamp column in the front
......@@ -666,6 +671,7 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
const char* msg0 = "sliding value too small";
const char* msg1 = "sliding value no larger than the interval value";
const char* msg2 = "sliding value can not less than 1% of interval value";
const char* msg3 = "does not support sliding when interval is natual month/year";
const static int32_t INTERVAL_SLIDING_FACTOR = 100;
......@@ -673,21 +679,27 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
SStrToken* pSliding = &pQuerySql->sliding;
if (pSliding->n != 0) {
getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->slidingTime /= 1000;
}
if (pSliding->n == 0) {
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->slidingTime < tsMinSlidingTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->intervalTimeUnit == 'n' || pQueryInfo->intervalTimeUnit == 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
} else {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->slidingTime /= 1000;
}
if (pQueryInfo->slidingTime < tsMinSlidingTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) {
......@@ -1501,13 +1513,13 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
return TSDB_CODE_SUCCESS;
}
static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName,
static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, SConvertFunc cvtFunc, char* aliasName,
int32_t resColIdx, SColumnIndex* pColIndex) {
int16_t type = 0;
int16_t bytes = 0;
char columnName[TSDB_COL_NAME_LEN] = {0};
const char* msg1 = "not support column types";
int32_t functionID = cvtFunc.execFuncId;
if (functionID == TSDB_FUNC_SPREAD) {
if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY ||
......@@ -1523,16 +1535,21 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
type = pSchema[pColIndex->columnIndex].type;
bytes = pSchema[pColIndex->columnIndex].bytes;
}
if (aliasName != NULL) {
tstrncpy(columnName, aliasName, sizeof(columnName));
} else {
getRevisedName(columnName, functionID, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
getRevisedName(columnName, cvtFunc.originFuncId, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
}
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) {
pExpr->colInfo.flag |= TSDB_COL_NULL;
}
// set reverse order scan data blocks for last query
if (functionID == TSDB_FUNC_LAST) {
pExpr->numOfParams = 1;
......@@ -1766,7 +1783,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
SConvertFunc cvtFunc = {.originFuncId = functionID, .execFuncId = functionID};
if (functionID == TSDB_FUNC_LAST_ROW && TSWINDOW_IS_EQUAL(pQueryInfo->window,TSWINDOW_INITIALIZER)) {
cvtFunc.execFuncId = TSDB_FUNC_LAST;
}
if (!requireAllFields) {
if (pItem->pNode->pParam->nExpr < 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
......@@ -1798,7 +1818,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) {
index.columnIndex = j;
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex++, &index) != 0) {
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex++, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
......@@ -1815,8 +1835,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex + i, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -1853,7 +1872,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) {
SColumnIndex index = {.tableIndex = j, .columnIndex = i};
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex, &index) != 0) {
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -4675,7 +4694,9 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg0 = "sample interval can not be less than 10ms.";
const char* msg1 = "functions not allowed in select clause";
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10) {
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10 &&
pQueryInfo->intervalTimeUnit != 'n' &&
pQueryInfo->intervalTimeUnit != 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
......@@ -5238,7 +5259,7 @@ static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId)
for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, j);
if (columnId == pColIndex->colId && pColIndex->flag == TSDB_COL_TAG) {
if (columnId == pColIndex->colId && TSDB_COL_IS_TAG(pColIndex->flag )) {
return true;
}
}
......@@ -5537,7 +5558,6 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
}
}
int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "only one expression allowed";
const char* msg2 = "invalid expression in select clause";
......@@ -6078,6 +6098,10 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2);
if (pQuerySql->pWhere) {
pQueryInfo->window = TSWINDOW_INITIALIZER;
}
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -6161,7 +6185,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (pQueryInfo->intervalTime > 0) {
if (pQueryInfo->intervalTime > 0 && pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_INTERVAL_TIME_WINDOW) {
......
......@@ -673,6 +673,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->numOfTags = htonl(numOfTags);
......
......@@ -46,22 +46,23 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) {
return true;
}
static int64_t tscGetRetryDelayTime(int64_t slidingTime, int16_t prec) {
static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, int16_t prec) {
float retryRangeFactor = 0.3f;
// change to ms
if (prec == TSDB_TIME_PRECISION_MICRO) {
slidingTime = slidingTime / 1000;
}
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
if (slidingTime < retryDelta) {
return slidingTime;
} else {
return retryDelta;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
// change to ms
if (prec == TSDB_TIME_PRECISION_MICRO) {
slidingTime = slidingTime / 1000;
}
if (slidingTime < retryDelta) {
return slidingTime;
}
}
return retryDelta;
}
static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
......@@ -86,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
......@@ -131,13 +132,17 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
if (etime > pStream->etime) {
etime = pStream->etime;
} else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
} else {
etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval;
etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
}
pQueryInfo->window.ekey = etime;
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
int64_t timer = pStream->slidingTime;
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
timer = 86400 * 1000l;
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer /= 1000l;
}
tscSetRetryTimer(pStream, pSql, timer);
......@@ -157,7 +162,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
......@@ -218,7 +223,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
SSqlObj * pSql = (SSqlObj *)res;
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
......@@ -241,7 +246,11 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
if (!pStream->isProject) {
pStream->stime += pStream->slidingTime;
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
} else {
pStream->stime += pStream->slidingTime;
}
}
// actually only one row is returned. this following is not necessary
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
......@@ -301,7 +310,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
now + timer, timer, delay, pStream->stime, etime);
} else {
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1);
pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
......@@ -311,23 +320,26 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
}
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
int64_t delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
if (maxDelay > remainTimeWindow) {
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
int64_t delayDelta = maxDelay;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
if (maxDelay > remainTimeWindow) {
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
}
}
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
assert(currentDelay < pStream->slidingTime);
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
assert(currentDelay < pStream->slidingTime);
}
return currentDelay;
}
......@@ -354,7 +366,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
return;
}
} else {
if ((pStream->stime - pStream->interval) >= pStream->etime) {
int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
if (stime >= pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
......@@ -387,24 +400,24 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->intervalTime < minIntervalTime) {
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
}
pStream->interval = pQueryInfo->intervalTime; // it shall be derived from sql string
pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
if (pQueryInfo->slidingTime == 0) {
if (pQueryInfo->slidingTime <= 0) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pQueryInfo->slidingTime == -1) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
} else if (pQueryInfo->slidingTime < minSlidingTime) {
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
......@@ -418,6 +431,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
}
pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pStream->slidingTime = pQueryInfo->slidingTime;
if (pStream->isProject) {
......@@ -431,7 +445,7 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
if (pStream->isProject) {
// no data in table, flush all data till now to destination meter, 10sec delay
pStream->interval = tsProjectExecInterval;
pStream->intervalTime = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
......@@ -442,11 +456,15 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
}
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
stime -= pStream->interval;
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
stime = pQueryInfo->window.skey;
if (stime == INT64_MIN) {
stime = (int64_t)taosGetTimestamp(pStream->precision);
stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
}
} else {
int64_t newStime = (stime / pStream->interval) * pStream->interval;
int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
......@@ -516,7 +534,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pTableMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, pSql->sqlstr);
pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
......
......@@ -178,6 +178,8 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
pSupporter->subqueryIndex = index;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
pSupporter->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pSupporter->slidingTime = pQueryInfo->slidingTimeUnit;
pSupporter->intervalTime = pQueryInfo->intervalTime;
pSupporter->slidingTime = pQueryInfo->slidingTime;
pSupporter->limit = pQueryInfo->limit;
......@@ -309,6 +311,8 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
// set the second stage sub query for join process
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
pQueryInfo->intervalTimeUnit = pSupporter->intervalTimeUnit;
pQueryInfo->slidingTimeUnit = pSupporter->slidingTimeUnit;
pQueryInfo->intervalTime = pSupporter->intervalTime;
pQueryInfo->slidingTime = pSupporter->slidingTime;
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
......
......@@ -1830,6 +1830,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pNewQueryInfo->command = pQueryInfo->command;
pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
......
......@@ -35,6 +35,8 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H
......@@ -100,33 +100,120 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
key /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key /= 1000;
}
struct tm tm;
time_t t = (time_t)key;
localtime_r(&t, &tm);
if (timeUnit == 'y') {
intervalTime *= 12;
}
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
key = mktime(&tm) * 1000L;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key *= 1000L;
}
return key;
}
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
skey /= 1000;
ekey /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
skey /= 1000;
ekey /= 1000;
}
if (ekey < skey) {
int64_t tmp = ekey;
ekey = skey;
skey = tmp;
}
struct tm tm;
time_t t = (time_t)skey;
localtime_r(&t, &tm);
int smon = tm.tm_year * 12 + tm.tm_mon;
t = (time_t)ekey;
localtime_r(&t, &tm);
int emon = tm.tm_year * 12 + tm.tm_mon;
if (timeUnit == 'y') {
intervalTime *= 12;
}
return (emon - smon) / (int32_t)intervalTime;
}
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
int64_t start = startTime;
if (timeUnit == 'n' || timeUnit == 'y') {
start /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
start /= 1000;
}
struct tm tm;
time_t t = (time_t)start;
localtime_r(&t, &tm);
tm.tm_sec = 0;
tm.tm_min = 0;
tm.tm_hour = 0;
tm.tm_mday = 1;
if (timeUnit == 'y') {
tm.tm_mon = 0;
tm.tm_year = (int)(tm.tm_year / slidingTime * slidingTime);
} else {
int mon = tm.tm_year * 12 + tm.tm_mon;
mon = (int)(mon / slidingTime * slidingTime);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
}
int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime;
if (!(timeUnit == 'u' || timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
start = mktime(&tm) * 1000L;
if (precision == TSDB_TIME_PRECISION_MICRO) {
start *= 1000L;
}
} else {
start = ((start - intervalTime) / slidingTime + 1) * slidingTime;
if (timeUnit == 'd' || timeUnit == 'w') {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
start += timezone * t;
}
int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
start += timezone * t;
int64_t end = start + intervalTime - 1;
if (end < startTime) {
start += slidingTime;
}
}
int64_t end = start + intervalTime - 1;
if (end < startTime) {
start += slidingTime;
}
return start;
}
......
......@@ -33,7 +33,7 @@ public class PreparedStatementTest extends BaseTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/",properties);
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String sql = "drop database if exists " + dbName;
statement = (TSDBPreparedStatement) connection.prepareStatement(sql);
......
Metadata-Version: 2.1
Name: taos
Version: 2.0.0
Summary: TDengine python client package
Home-page: https://github.com/pypa/sampleproject
Author: Taosdata Inc.
Author-email: support@taosdata.com
License: UNKNOWN
Description: # TDengine python client interface
Platform: UNKNOWN
Classifier: Programming Language :: Python :: 2
Classifier: Operating System :: Linux
Description-Content-Type: text/markdown
README.md
setup.py
taos/__init__.py
taos/cinterface.py
taos/connection.py
taos/constants.py
taos/cursor.py
taos/dbapi.py
taos/error.py
taos.egg-info/PKG-INFO
taos.egg-info/SOURCES.txt
taos.egg-info/dependency_links.txt
taos.egg-info/top_level.txt
\ No newline at end of file
Metadata-Version: 2.1
Name: taos
Version: 2.0.0
Summary: TDengine python client package
Home-page: https://github.com/pypa/sampleproject
Author: Taosdata Inc.
Author-email: support@taosdata.com
License: UNKNOWN
Description: # TDengine python client interface
Platform: UNKNOWN
Classifier: Programming Language :: Python :: 3
Classifier: Operating System :: Linux
Description-Content-Type: text/markdown
README.md
setup.py
taos/__init__.py
taos/cinterface.py
taos/connection.py
taos/constants.py
taos/cursor.py
taos/dbapi.py
taos/error.py
taos.egg-info/PKG-INFO
taos.egg-info/SOURCES.txt
taos.egg-info/dependency_links.txt
taos.egg-info/top_level.txt
\ No newline at end of file
from taos.cinterface import CTaosInterface
from taos.error import *
from taos.subscription import TDengineSubscription
from taos.connection import TDengineConnection
if __name__ == '__main__':
conn = TDengineConnection(
host="127.0.0.1", user="root", password="taosdata", database="test")
# Generate a cursor object to run SQL commands
sub = conn.subscribe(False, "test", "select * from log0601;", 1000)
for i in range(100):
print(i)
data = sub.consume()
for d in data:
print(d)
sub.close()
conn.close()
Metadata-Version: 2.1
Name: taos
Version: 2.0.0
Summary: TDengine python client package
Home-page: https://github.com/pypa/sampleproject
Author: Taosdata Inc.
Author-email: support@taosdata.com
License: UNKNOWN
Description: # TDengine python client interface
Platform: UNKNOWN
Classifier: Programming Language :: Python :: 2
Classifier: Operating System :: Windows
Description-Content-Type: text/markdown
README.md
setup.py
taos/__init__.py
taos/cinterface.py
taos/connection.py
taos/constants.py
taos/cursor.py
taos/dbapi.py
taos/error.py
taos.egg-info/PKG-INFO
taos.egg-info/SOURCES.txt
taos.egg-info/dependency_links.txt
taos.egg-info/top_level.txt
\ No newline at end of file
Metadata-Version: 2.1
Name: taos
Version: 2.0.0
Summary: TDengine python client package
Home-page: https://github.com/pypa/sampleproject
Author: Hongze Cheng
Author-email: hzcheng@taosdata.com
License: UNKNOWN
Description: # TDengine python client interface
Platform: UNKNOWN
Classifier: Programming Language :: Python :: 3
Classifier: Operating System :: Windows
Description-Content-Type: text/markdown
README.md
setup.py
taos/__init__.py
taos/cinterface.py
taos/connection.py
taos/constants.py
taos/cursor.py
taos/dbapi.py
taos/error.py
taos.egg-info/PKG-INFO
taos.egg-info/SOURCES.txt
taos.egg-info/dependency_links.txt
taos.egg-info/top_level.txt
\ No newline at end of file
......@@ -85,7 +85,11 @@ extern const int32_t TYPE_BYTES[11];
#define TSDB_DATA_NULL_STR_L "null"
#define TSDB_DEFAULT_USER "root"
#ifdef _TD_POWER_
#define TSDB_DEFAULT_PASS "powerdb"
#else
#define TSDB_DEFAULT_PASS "taosdata"
#endif
#define TSDB_TRUE 1
#define TSDB_FALSE 0
......
......@@ -170,6 +170,13 @@ enum _mgmt_table {
#define TSDB_COL_NORMAL 0x0u // the normal column of the table
#define TSDB_COL_TAG 0x1u // the tag column type
#define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column
#define TSDB_COL_NULL 0x4u // the column filter NULL or not
#define TSDB_COL_IS_TAG(f) (((f&(~(TSDB_COL_NULL)))&TSDB_COL_TAG) != 0)
#define TSDB_COL_IS_NORMAL_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_NORMAL)
#define TSDB_COL_IS_UD_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_UDC)
#define TSDB_COL_REQ_NULL(f) (((f)&TSDB_COL_NULL) != 0)
extern char *taosMsg[];
......@@ -456,6 +463,7 @@ typedef struct {
int64_t intervalTime; // time interval for aggregation, in million second
int64_t intervalOffset; // start offset for interval query
int64_t slidingTime; // value for sliding window
char intervalTimeUnit;
char slidingTimeUnit; // time interval type, for revisement of interval(1d)
uint16_t tagCondLen; // tag length in current query
int16_t numOfGroupCols; // num of group by columns
......
......@@ -3,5 +3,3 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
#ADD_SUBDIRECTORY(taosClusterTest)
ADD_SUBDIRECTORY(taosnetwork)
......@@ -24,7 +24,12 @@ ELSEIF (TD_WINDOWS)
LIST(APPEND SRC ./src/shellWindows.c)
ADD_EXECUTABLE(shell ${SRC})
TARGET_LINK_LIBRARIES(shell taos_static)
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
IF (TD_POWER)
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME power)
ELSE ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
ENDIF ()
ELSEIF (TD_DARWIN)
LIST(APPEND SRC ./src/shellEngine.c)
LIST(APPEND SRC ./src/shellMain.c)
......
......@@ -50,6 +50,9 @@ typedef struct SShellArguments {
char* commands;
int abort;
int port;
int endPort;
int pktLen;
char* netTestRole;
} SShellArguments;
/**************** Function declarations ****************/
......
......@@ -30,11 +30,22 @@
#include <regex.h>
/**************** Global variables ****************/
#ifdef _TD_POWER_
char CLIENT_VERSION[] = "Welcome to the PowerDB shell from %s, Client Version:%s\n"
"Copyright (c) 2017 by PowerDB, Inc. All rights reserved.\n\n";
char PROMPT_HEADER[] = "power> ";
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 7;
#else
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n";
char PROMPT_HEADER[] = "taos> ";
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 6;
#endif
TAOS_RES *result = NULL;
SShellHistory history;
......
......@@ -46,6 +46,9 @@ static struct argp_option options[] = {
{"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."},
{"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."},
{"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."},
{"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, valid option: client | server."},
{"endport", 'e', "ENDPORT", 0, "Net test end port, default is 6042."},
{"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."},
{0}};
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
......@@ -65,6 +68,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'P':
if (arg) {
tsDnodeShellPort = atoi(arg);
arguments->port = atoi(arg);
} else {
fprintf(stderr, "Invalid port\n");
return -1;
......@@ -126,6 +130,29 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'd':
arguments->database = arg;
break;
case 'n':
arguments->netTestRole = arg;
break;
case 'e':
if (arg) {
arguments->endPort = atoi(arg);
} else {
fprintf(stderr, "Invalid end port\n");
return -1;
}
break;
case 'l':
if (arg) {
arguments->pktLen = atoi(arg);
} else {
fprintf(stderr, "Invalid packet length\n");
return -1;
}
break;
case OPT_ABORT:
arguments->abort = 1;
break;
......
......@@ -15,6 +15,7 @@
#include "os.h"
#include "shell.h"
#include "tnettest.h"
pthread_t pid;
......@@ -60,7 +61,10 @@ SShellArguments args = {
.file = "\0",
.dir = "\0",
.threadNum = 5,
.commands = NULL
.commands = NULL,
.endPort = 6042,
.pktLen = 1000,
.netTestRole = NULL
};
/*
......@@ -75,6 +79,11 @@ int main(int argc, char* argv[]) {
shellParseArgument(argc, argv, &args);
if (args.netTestRole && args.netTestRole[0] != 0) {
taosNetTest(args.host, (uint16_t)args.port, (uint16_t)args.endPort, args.pktLen, args.netTestRole);
exit(0);
}
/* Initialize the shell */
TAOS* con = shellInit(&args);
if (con == NULL) {
......
......@@ -89,7 +89,11 @@ typedef struct DemoArguments {
{0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 0},
{0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 1},
{0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2},
#ifdef _TD_POWER_
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 3},
#else
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
#endif
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
{0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
......@@ -104,7 +108,11 @@ typedef struct DemoArguments {
{0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 1000.", 10},
{0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 11},
{0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 100000.", 12},
#ifdef _TD_POWER_
{0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 14},
#else
{0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14},
#endif
{0, 'x', 0, 0, "Insert only flag.", 13},
{0, 'O', "order", 0, "Insert mode--0: In order, 1: Out of order. Default is in order.", 14},
{0, 'R', "rate", 0, "Out of order data's rate--if order=1 Default 10, min: 0, max: 50.", 14},
......@@ -279,7 +287,11 @@ typedef struct DemoArguments {
printf("%s%s\n", indent, "-u");
printf("%s%s%s\n", indent, indent, "user, The user name to use when connecting to the server. Default is 'root'.");
printf("%s%s\n", indent, "-p");
#ifdef _TD_POWER_
printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'powerdb'.");
#else
printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'taosdata'.");
#endif
printf("%s%s\n", indent, "-d");
printf("%s%s%s\n", indent, indent, "database, Destination database. Default is 'test'.");
printf("%s%s\n", indent, "-a");
......@@ -309,7 +321,11 @@ typedef struct DemoArguments {
printf("%s%s\n", indent, "-n");
printf("%s%s%s\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 100000.");
printf("%s%s\n", indent, "-c");
#ifdef _TD_POWER_
printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/power/'.");
#else
printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'.");
#endif
printf("%s%s\n", indent, "-x");
printf("%s%s%s\n", indent, indent, "flag, Insert only flag.");
printf("%s%s\n", indent, "-O");
......@@ -513,7 +529,11 @@ int main(int argc, char *argv[]) {
SDemoArguments arguments = { NULL, // host
0, // port
"root", // user
#ifdef _TD_POWER_
"powerdb", // password
#else
"taosdata", // password
#endif
"test", // database
1, // replica
"t", // tb_prefix
......
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosClient client.c)
ADD_EXECUTABLE(taosServer server.c)
TARGET_LINK_LIBRARIES( taosServer -lpthread -lm -lrt )
TARGET_LINK_LIBRARIES( taosClient -lpthread -lm -lrt )
ENDIF ()
此差异已折叠。
......@@ -64,6 +64,7 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
}
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
......
......@@ -319,6 +319,8 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
*time = factor * seconds + fraction;
return 0;
}
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
*result = val;
......@@ -384,6 +386,23 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
}
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
errno = 0;
/* get the basic numeric value */
*duration = strtoll(token, NULL, 10);
if (errno != 0) {
return -1;
}
*unit = token[tokenLen - 1];
if (*unit == 'n' || *unit == 'y') {
return 0;
}
return getTimestampInUsFromStrImpl(*duration, *unit, duration);
}
// internal function, when program is paused in debugger,
// one can call this function from debugger to print a
// timestamp as human readable string, for example (gdb):
......
......@@ -18,15 +18,25 @@
#include "tglobal.h"
void osInit() {
#ifdef _TD_POWER_
if (configDir[0] == 0) {
strcpy(configDir, "/etc/power");
}
strcpy(tsDataDir, "/var/lib/power");
strcpy(tsLogDir, "/var/log/power");
strcpy(tsScriptDir, "/etc/power");
#else
if (configDir[0] == 0) {
strcpy(configDir, "/etc/taos");
}
strcpy(tsDataDir, "/var/lib/taos");
strcpy(tsLogDir, "/var/log/taos");
strcpy(tsScriptDir, "/etc/taos");
#endif
strcpy(tsVnodeDir, "");
strcpy(tsDnodeDir, "");
strcpy(tsMnodeDir, "");
strcpy(tsDataDir, "/var/lib/taos");
strcpy(tsLogDir, "/var/log/taos");
strcpy(tsScriptDir, "/etc/taos");
strcpy(tsOsName, "Linux");
}
\ No newline at end of file
......@@ -22,16 +22,29 @@ extern void taosWinSocketInit();
void osInit() {
taosSetCoreDump();
#ifdef _TD_POWER_
if (configDir[0] == 0) {
strcpy(configDir, "C:/PowerDB/cfg");
}
strcpy(tsVnodeDir, "C:/PowerDB/data");
strcpy(tsDataDir, "C:/PowerDB/data");
strcpy(tsLogDir, "C:/PowerDB/log");
strcpy(tsScriptDir, "C:/PowerDB/script");
#else
if (configDir[0] == 0) {
strcpy(configDir, "C:/TDengine/cfg");
}
strcpy(tsVnodeDir, "C:/TDengine/data");
strcpy(tsDnodeDir, "");
strcpy(tsMnodeDir, "");
strcpy(tsDataDir, "C:/TDengine/data");
strcpy(tsLogDir, "C:/TDengine/log");
strcpy(tsScriptDir, "C:/TDengine/script");
#endif
strcpy(tsDnodeDir, "");
strcpy(tsMnodeDir, "");
strcpy(tsOsName, "Windows");
taosWinSocketInit();
}
\ No newline at end of file
}
......@@ -132,11 +132,12 @@ typedef struct SQueryCostInfo {
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
char intervalTimeUnit;
char slidingTimeUnit; // interval data type, used for daytime revise
SOrderVal order;
STimeWindow window;
int64_t intervalTime;
int64_t slidingTime; // sliding time for sliding window query
char slidingTimeUnit; // interval data type, used for daytime revise
int16_t precision;
int16_t numOfOutput;
int16_t fillType;
......
......@@ -168,6 +168,7 @@ typedef struct SQLFunctionCtx {
int16_t outputType;
int16_t outputBytes; // size of results, determined by function and input column data type
bool hasNull; // null value exist in current block
bool requireNull; // require null in some function
int16_t functionId; // function id
void * aInputElemBuf;
char * aOutputBuf; // final result output buffer, point to sdata->data
......
此差异已折叠。
......@@ -55,7 +55,7 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
SFillColInfo* pColInfo = &pFillInfo->pFillCol[i];
pFillInfo->pData[i] = calloc(1, pColInfo->col.bytes * capacity);
if (pColInfo->flag == TSDB_COL_TAG) {
if (TSDB_COL_IS_TAG(pColInfo->flag)) {
bool exists = false;
for(int32_t j = 0; j < k; ++j) {
if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) {
......@@ -155,7 +155,7 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu
char* data = pInput->data + pCol->col.offset * pInput->num;
memcpy(pFillInfo->pData[i], data, (size_t)(pInput->num * pCol->col.bytes));
if (pCol->flag == TSDB_COL_TAG) { // copy the tag value to tag value buffer
if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
for (int32_t j = 0; j < pFillInfo->numOfTags; ++j) {
SFillTagColInfo* pTag = &pFillInfo->pTags[j];
if (pTag->col.colId == pCol->col.colId) {
......@@ -179,14 +179,22 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
} else {
numOfRes = taosCountNatualInterval(lastKey, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
}
assert(numOfRes >= numOfRows);
} else { // reach the end of data
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
return 0;
} else { // the numOfRes rows are all filled with specified policy
}
// the numOfRes rows are all filled with specified policy
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
numOfRes = (ABS(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
} else {
numOfRes = taosCountNatualInterval(ekey1, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
}
}
......@@ -251,7 +259,7 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi
static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t num) {
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
if (pCol->flag == TSDB_COL_NORMAL) {
if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
continue;
}
......@@ -366,7 +374,12 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu
setTagsValue(pFillInfo, data, *num);
}
pFillInfo->start += (pFillInfo->slidingTime * step);
// TODO natual sliding time
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
pFillInfo->start += (pFillInfo->slidingTime * step);
} else {
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
}
pFillInfo->numOfCurrent++;
(*num) += 1;
......@@ -446,7 +459,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// assign rows to dst buffer
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
if (pCol->flag == TSDB_COL_TAG) {
if (TSDB_COL_IS_TAG(pCol->flag)) {
continue;
}
......@@ -473,7 +486,12 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// set the tag value for final result
setTagsValue(pFillInfo, data, num);
pFillInfo->start += (pFillInfo->slidingTime * step);
// TODO natual sliding time
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
pFillInfo->start += (pFillInfo->slidingTime * step);
} else {
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
}
pFillInfo->rowIdx += 1;
pFillInfo->numOfCurrent +=1;
......
......@@ -1130,8 +1130,15 @@ static int tsdbCheckAndDecodeColumnData(SDataCol *pDataCol, char *content, int32
// Decode the data
if (comp) {
// // Need to decompress
pDataCol->len = (*(tDataTypeDesc[pDataCol->type].decompFunc))(
content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData, pDataCol->spaceSize, comp, buffer, bufferSize);
int tlen = (*(tDataTypeDesc[pDataCol->type].decompFunc))(content, len - sizeof(TSCKSUM), numOfRows, pDataCol->pData,
pDataCol->spaceSize, comp, buffer, bufferSize);
if (tlen <= 0) {
tsdbError("Failed to decompress column, file corrupted, len:%d comp:%d numOfRows:%d maxPoints:%d bufferSize:%d",
len, comp, numOfRows, maxPoints, bufferSize);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return -1;
}
pDataCol->len = tlen;
if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) {
dataColSetOffset(pDataCol, numOfRows);
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -260,8 +260,13 @@ void taosReadGlobalLogCfg() {
}
strcpy(configDir, full_path.we_wordv[0]);
} else {
#ifdef _TD_POWER_
printf("configDir:%s not there, use default value: /etc/power", configDir);
strcpy(configDir, "/etc/power");
#else
printf("configDir:%s not there, use default value: /etc/taos", configDir);
strcpy(configDir, "/etc/taos");
#endif
}
wordfree(&full_path);
......
......@@ -66,7 +66,11 @@ int32_t tsAsyncLog = 1;
float tsTotalLogDirGB = 0;
float tsAvailLogDirGB = 0;
float tsMinimalLogDirGB = 0.1f;
#ifdef _TD_POWER_
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power";
#else
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/taos";
#endif
static SLogObj tsLogObj = { .fileNum = 1 };
static void * taosAsyncOutputLog(void *param);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册