提交 60bda662 编写于 作者: G Ganlin Zhao

Merge branch 'develop' into fix/TD-11592

[submodule "src/connector/go"] [submodule "src/connector/go"]
path = src/connector/go path = src/connector/go
url = https://github.com/taosdata/driver-go.git url = https://github.com/taosdata/driver-go.git
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
url = https://github.com/taosdata/grafanaplugin.git
[submodule "src/connector/hivemq-tdengine-extension"] [submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension path = src/connector/hivemq-tdengine-extension
url = https://github.com/taosdata/hivemq-tdengine-extension.git url = https://github.com/taosdata/hivemq-tdengine-extension.git
......
...@@ -66,10 +66,12 @@ def pre_test(){ ...@@ -66,10 +66,12 @@ def pre_test(){
} }
sh''' sh'''
cd ${WKC} cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git clean -dfx git clean -dfx
git ls-files --stage | grep 160000 | awk '{print $4}' | xargs git rm --cached
git submodule update --init --recursive git submodule update --init --recursive
cd ${WK} cd ${WK}
git reset --hard HEAD~10 git reset --hard HEAD~10
...@@ -139,10 +141,12 @@ def pre_test_noinstall(){ ...@@ -139,10 +141,12 @@ def pre_test_noinstall(){
} }
sh''' sh'''
cd ${WKC} cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git clean -dfx git clean -dfx
git ls-files --stage | grep 160000 | awk '{print $4}' | xargs git rm --cached
git submodule update --init --recursive git submodule update --init --recursive
cd ${WK} cd ${WK}
git reset --hard HEAD~10 git reset --hard HEAD~10
...@@ -209,10 +213,12 @@ def pre_test_mac(){ ...@@ -209,10 +213,12 @@ def pre_test_mac(){
} }
sh''' sh'''
cd ${WKC} cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git clean -dfx git clean -dfx
git ls-files --stage | grep 160000 | awk '{print $4}' | xargs git rm --cached
git submodule update --init --recursive git submodule update --init --recursive
cd ${WK} cd ${WK}
git reset --hard HEAD~10 git reset --hard HEAD~10
......
...@@ -254,23 +254,25 @@ Query OK, 2 row(s) in set (0.001700s) ...@@ -254,23 +254,25 @@ Query OK, 2 row(s) in set (0.001700s)
TDengine 提供了丰富的应用程序开发接口,其中包括C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用: TDengine 提供了丰富的应用程序开发接口,其中包括C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用:
- Java - [Java](https://www.taosdata.com/cn/documentation/connector/java)
- C/C++ - [C/C++](https://www.taosdata.com/cn/documentation/connector#c-cpp)
- Python - [Python](https://www.taosdata.com/cn/documentation/connector#python)
- Go - [Go](https://www.taosdata.com/cn/documentation/connector#go)
- RESTful API - [RESTful API](https://www.taosdata.com/cn/documentation/connector#restful)
- Node.js - [Node.js](https://www.taosdata.com/cn/documentation/connector#nodejs)
- [Rust](https://www.taosdata.com/cn/documentation/connector/rust)
## 第三方连接器 ## 第三方连接器
TDengine 社区生态中也有一些非常友好的第三方连接器,可以通过以下链接访问它们的源码。 TDengine 社区生态中也有一些非常友好的第三方连接器,可以通过以下链接访问它们的源码。
- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust) - [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos) - [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua) - [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
......
...@@ -281,18 +281,19 @@ drop database db; ...@@ -281,18 +281,19 @@ drop database db;
TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation. TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation.
- [Java](https://www.taosdata.com/en/documentation/connector/#Java-Connector) - [Java](https://www.taosdata.com/en/documentation/connector/java)
- [C/C++](https://www.taosdata.com/en/documentation/connector/#C/C++-Connector) - [C/C++](https://www.taosdata.com/en/documentation/connector#c-cpp)
- [Python](https://www.taosdata.com/en/documentation/connector/#Python-Connector) - [Python](https://www.taosdata.com/en/documentation/connector#python)
- [Go](https://www.taosdata.com/en/documentation/connector/#Go-Connector) - [Go](https://www.taosdata.com/en/documentation/connector#go)
- [RESTful API](https://www.taosdata.com/en/documentation/connector/#RESTful-Connector) - [RESTful API](https://www.taosdata.com/en/documentation/connector#restful)
- [Node.js](https://www.taosdata.com/en/documentation/connector/#Node.js-Connector) - [Node.js](https://www.taosdata.com/en/documentation/connector#nodejs)
- [Rust](https://www.taosdata.com/en/documentation/connector/rust)
### Third Party Connectors ### Third Party Connectors
The TDengine community has also kindly built some of their own connectors! Follow the links below to find the source code for them. The TDengine community has also kindly built some of their own connectors! Follow the links below to find the source code for them.
- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust) - [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos) - [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua) - [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
......
...@@ -41,7 +41,6 @@ ELSEIF (TD_WINDOWS) ...@@ -41,7 +41,6 @@ ELSEIF (TD_WINDOWS)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/jh_taos.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/jh_taos.exe DESTINATION .)
ELSE () ELSE ()
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .)
ENDIF () ENDIF ()
#INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS taos RUNTIME DESTINATION driver)
......
...@@ -596,7 +596,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ...@@ -596,7 +596,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
"interlace_rows": 设置轮询插入每个单表数据的条目数,如果interlace_rows*childtable_count*supertable_num小于num_of_records_per_req时,则请求插入的数目以interlace_rows*childtable_count*supertable_num为准。可选项,缺省是0。 "interlace_rows": 设置轮询插入每个单表数据的条目数,如果interlace_rows*childtable_count*supertable_num小于num_of_records_per_req时,则请求插入的数目以interlace_rows*childtable_count*supertable_num为准。可选项,缺省是0。
"num_of_records_per_req": 每条请求数据内容包含的插入数据记录数目,该数据组成的sql不能大于maxsqllen,如果过大,则取taood限制的1M长度(1048576)。可选项,缺省是INT64_MAX 32766(受服务端限制)。0代表不插入数据,建议配置大于0。 "num_of_records_per_req": 每条请求数据内容包含的插入数据记录数目,该数据组成的sql不能大于maxsqllen,如果过大,则取taosd限制的1M长度(1048576)。0代表不插入数据,建议配置大于0。
"databases": [{ "databases": [{
......
...@@ -22,14 +22,14 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, ...@@ -22,14 +22,14 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
**Tips:** **Tips:**
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为1M) - 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过1M
- TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。 - TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。
- 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。
- 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。
## <a class="anchor" id="schemaless"></a>无模式(Schemaless)写入 ## <a class="anchor" id="schemaless"></a>无模式(Schemaless)写入
**前言** **前言**
<br/>在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。 <br/>在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
<br/>目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。 <br/>目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。
<br/>无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 <br/>无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
...@@ -74,21 +74,19 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 ...@@ -74,21 +74,19 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
### 无模式写入的主要处理逻辑 ### 无模式写入的主要处理逻辑
无模式写入按照如下原则来处理行数据: 无模式写入按照如下原则来处理行数据:
1. 当 tag_set 中有 ID 字段时,该字段的值将作为子表的表名。 <br/>1. 将使用如下规则来生成子表名:首先将measurement 的名称和标签的 key 和 value 组合成为如下的字符串
2. 没有 ID 字段时,将使用如下规则来生成子表名:
首先将measurement 的名称和标签的 key 和 value 组合成为如下的字符串
```json ```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2" "measurement,tag_key1=tag_value1,tag_key2=tag_value2"
``` ```
需要注意的是,这里的tag_key1, tag_key2并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。 需要注意的是,这里的tag_key1, tag_key2并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。 排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
<br/>3. 如果解析行协议获得的超级表不存在,则会创建这个超级表。 <br/>2. 如果解析行协议获得的超级表不存在,则会创建这个超级表。
<br/>4. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 <br/>3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
<br/>5. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。 <br/>4. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。
<br/>6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 <br/>5. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。
<br/>7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 <br/>6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
<br/>8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 <br/>7. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。
<br/>9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 <br/>8. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
**备注:** **备注:**
<br/>无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。 <br/>无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。
...@@ -116,6 +114,17 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 ...@@ -116,6 +114,17 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。 在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。
**数据模式映射规则**
<br/>本节将说明行协议的数据如何映射成为具有模式的数据。每个行协议中数据 measurement 映射为 超级表名称。tag_set 中的 标签名称为 数据模式中的标签名,field_set 中的名称为列名称。以如下数据为例,说明映射规则:
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
```
该行数据映射生成一个超级表: st, 其包含了 3 个类型为 nchar 的标签,分别是:t1, t2, t3。五个数据列,分别是ts(timestamp),c1 (bigint),c3(binary),c2 (bool), c4 (bigint)。映射成为如下 SQL 语句:
```json
create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2))
```
**数据模式变更处理** **数据模式变更处理**
<br/>本节将说明不同行数据写入情况下,对于数据模式的影响。 <br/>本节将说明不同行数据写入情况下,对于数据模式的影响。
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
默认情况下,[libtaos-rs] 使用 C 接口连接数据库,所以您需要: 默认情况下,[libtaos-rs] 使用 C 接口连接数据库,所以您需要:
- [TDengine] [客户端](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) - [TDengine客户端](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85)
- `clang`: `bindgen` 使用 `libclangAST` 来生成对应的Rust绑定。 - `clang`: `bindgen` 使用 `libclangAST` 来生成对应的Rust绑定。
## 特性列表 ## 特性列表
...@@ -62,7 +62,7 @@ libtaos = { version = "*", features = ["r2d2"] } ...@@ -62,7 +62,7 @@ libtaos = { version = "*", features = ["r2d2"] }
libtaos = { version = "*", features = ["rest"] } libtaos = { version = "*", features = ["rest"] }
``` ```
本项目中提供一个 [示例程序]([examples/demo.rs](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs)) 如下: 本项目中提供一个 [示例程序](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs) 如下:
```rust ```rust
// ... // ...
......
...@@ -1037,43 +1037,62 @@ HTTP 请求 URL 采用 `sqlutc` 时,返回结果集的时间戳将采用 UTC ...@@ -1037,43 +1037,62 @@ HTTP 请求 URL 采用 `sqlutc` 时,返回结果集的时间戳将采用 UTC
## <a class="anchor" id="csharp"></a>CSharp Connector ## <a class="anchor" id="csharp"></a>CSharp Connector
C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 * C#连接器支持的系统有:Linux 64/Windows x64/Windows x86
* C#连接器现在也支持从[Nuget下载引用](https://www.nuget.org/packages/TDengine.Connector/)
* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。
### 安装准备 ### 安装准备
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) * 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)
* 接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 * 接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。
* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。 * 安装[.NET SDK](https://dotnet.microsoft.com/download)
### 示例程序 ### 示例程序
示例程序源码位于install_directory/examples/C#,有: 示例程序源码位于
* {client_install_directory}/examples/C#
* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23)
TDengineTest.cs C#示例源程序 **注意:** TDengineTest.cs C#示例源程序,包含了数据库连接参数,以及如何执行数据插入、查询等操作。
### 安装验证 ### 安装验证
运行install_directory/examples/C#/C#Checker/C#Checker.exe 需要先安装 .Net SDK
```cmd ```cmd
cd {install_directory}/examples/C#/C#Checker cd {client_install_directory}/examples/C#/C#Checker
csc /optimize *.cs //运行测试
C#Checker.exe -h <fqdn> dotnet run -- -h <FQDN>. // 此步骤会先build,然后再运行。
``` ```
### C#连接器的使用 ### C#连接器的使用
在Windows系统上,C#应用程序可以使用TDengine的C#连接器接口来执行所有数据库的操作。使用的具体步骤如下所示: 在Windows系统上,C#应用程序可以使用TDengine的C#连接器接口来执行所有数据库的操作。使用的具体步骤如下所示:
1. 将接口文件TDengineDrivercs.cs加入到应用程序所在的项目空间中。 需要 .NET SDK
2. 用户可以参考TDengineTest.cs来定义数据库连接参数,以及如何执行数据插入、查询等操作。 * 创建一个c# project.
``` cmd
mkdir test
cd test
dotnet new console
```
* 通过Nuget引用TDengineDriver包
``` cmd
dotnet add package TDengine.Connector
```
* 在项目中需要用到TDengineConnector的地方引用TDengineDriver namespace。
```c#
using TDengineDriver;
```
* 用户可以参考[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23/TDengineTest)来定义数据库连接参数,以及如何执行数据插入、查询等操作。
此接口需要用到taos.dll文件,所以在执行应用程序前,拷贝Windows客户端install_directory/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。
**注意:** **注意:**
1. TDengine V2.0.3.0之后同时支持32位和64位Windows系统,所以C#项目在生成.exe文件时,“解决方案”/“项目”的“平台”请选择对应的“X86” 或“x64”。 * TDengine V2.0.3.0之后同时支持32位和64位Windows系统,所以C#项目在生成.exe文件时,“解决方案”/“项目”的“平台”请选择对应的“X86” 或“x64”。
2. 此接口目前已经在Visual Studio 2015/2017中验证过,其它VS版本尚待验证。 * 此接口目前已经在Visual Studio 2015/2017中验证过,其它VS版本尚待验证。
* 此连接器需要用到taos.dll文件,所以在未安装客户端时需要在执行应用程序前,拷贝Windows{client_install_directory}/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。
### 第三方驱动 ### 第三方驱动
......
...@@ -166,7 +166,7 @@ taosd -C ...@@ -166,7 +166,7 @@ taosd -C
| 48 | mqttPort | YES | **S** | | mqtt client name | | | 1883 | | 48 | mqttPort | YES | **S** | | mqtt client name | | | 1883 |
| 49 | mqttTopic | YES | **S** | | | | | /test | | 49 | mqttTopic | YES | **S** | | | | | /test |
| 50 | compressMsgSize | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值。如果要压缩消息,建议设置为64330字节,即大于64330字节的消息体才进行压缩。 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 | -1 | | | 50 | compressMsgSize | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值。如果要压缩消息,建议设置为64330字节,即大于64330字节的消息体才进行压缩。 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 | -1 | |
| 51 | maxSQLLength | | **C** | bytes | 单条SQL语句允许的最长限制 | 65480-1048576 | 65380 | | | 51 | maxSQLLength | | **C** | bytes | 单条SQL语句允许的最长限制 | 65480-1048576 | 1048576 | |
| 52 | maxNumOfOrderedRes | | **SC** | | 支持超级表时间排序允许的最多记录数限制 | | 10万 | | | 52 | maxNumOfOrderedRes | | **SC** | | 支持超级表时间排序允许的最多记录数限制 | | 10万 | |
| 53 | timezone | | **SC** | | 时区 | | 从系统中动态获取当前的时区设置 | | | 53 | timezone | | **SC** | | 时区 | | 从系统中动态获取当前的时区设置 | |
| 54 | locale | | **SC** | | 系统区位信息及编码格式 | | 系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 | | | 54 | locale | | **SC** | | 系统区位信息及编码格式 | | 系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 | |
...@@ -731,7 +731,7 @@ rmtaos ...@@ -731,7 +731,7 @@ rmtaos
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) - 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列)
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) - 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
- 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte - 单条 SQL 语句默认最大字符串长度:1048576 byte,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 byte
- 数据库副本数:不能超过 3 - 数据库副本数:不能超过 3
- 用户名:不能超过 23 个 byte - 用户名:不能超过 23 个 byte
- 用户密码:不能超过 15 个 byte - 用户密码:不能超过 15 个 byte
......
...@@ -1589,7 +1589,7 @@ SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters ...@@ -1589,7 +1589,7 @@ SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters
- 表名最大长度为 192,每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 表名最大长度为 192,每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。 - 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M - SQL 语句最大长度 1048576 个字符,也可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。
......
...@@ -181,7 +181,7 @@ select count(*) from memory ...@@ -181,7 +181,7 @@ select count(*) from memory
完成查询后,如果写入的数据与预期的相比没有差别,同时写入程序本身没有异常的报错信息,那么可用确认数据写入是完整有效的。 完成查询后,如果写入的数据与预期的相比没有差别,同时写入程序本身没有异常的报错信息,那么可用确认数据写入是完整有效的。
TDengine不支持采用OpenTSDB的查询语法进行查询或数据获取处理,但是针对OpenTSDB的每种查询都提供对应的支持。你可以用检查附件2获取对应的查询处理的调整和应用使用的方式,如果需要全面了解TDengine支持的查询类型,请参阅TDengine的用户手册。 TDengine不支持采用OpenTSDB的查询语法进行查询或数据获取处理,但是针对OpenTSDB的每种查询都提供对应的支持。可以用检查附录1获取对应的查询处理的调整和应用使用的方式,如果需要全面了解TDengine支持的查询类型,请参阅TDengine的用户手册。
TDengine支持标准的JDBC 3.0接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。 TDengine支持标准的JDBC 3.0接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。
......
...@@ -12,7 +12,7 @@ Thanks [@songtianyi](https://github.com/songtianyi) for [libtdengine](https://gi ...@@ -12,7 +12,7 @@ Thanks [@songtianyi](https://github.com/songtianyi) for [libtdengine](https://gi
if you use the default features, it'll depend on: if you use the default features, it'll depend on:
- [TDengine] Client library and headers. - [TDengine Client](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) library and headers.
- clang because bindgen will requires the clang AST library. - clang because bindgen will requires the clang AST library.
## Fetures ## Fetures
...@@ -66,7 +66,7 @@ For REST client: ...@@ -66,7 +66,7 @@ For REST client:
libtaos = { version = "*", features = ["rest"] } libtaos = { version = "*", features = ["rest"] }
``` ```
There's a [demo app]([examples/demo.rs](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs)) in examples directory, looks like this: There's a [demo app](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs) in examples directory, looks like this:
```rust ```rust
// ... // ...
......
...@@ -841,37 +841,60 @@ Only some configuration parameters related to RESTful interface are listed below ...@@ -841,37 +841,60 @@ Only some configuration parameters related to RESTful interface are listed below
## <a class="anchor" id="csharp"></a> CSharp Connector ## <a class="anchor" id="csharp"></a> CSharp Connector
The C # connector supports: Linux 64/Windows x64/Windows x86.
* The C # connector supports: Linux 64/Windows x64/Windows x86.
* C# connector can be download and include as normal table form [Nuget.org](https://www.nuget.org/packages/TDengine.Connector/).
* On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver.
### Installation preparation ### Installation preparation
- For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver). * For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver).
- . NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory. * .NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory.
- On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver. * Install [.NET SDK](https://dotnet.microsoft.com/download)
### Installation verification ### Example Source Code
you can find sample code under follow directions:
* {client_install_directory}/examples/C#
* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23)
Run install_directory/examples/C#/C#Checker/C#Checker.exe **Tips:** TDengineTest.cs One of C# connector's sample code that include basic examples like connection,sql executions and so on.
### Installation verification
Run {client_install_directory}/examples/C#/C#Checker/C#Checker.cs
Need install .Net SDK first
```cmd ```cmd
cd {install_directory}/examples/C#/C#Checker cd {client_install_directory}/examples/C#/C#Checker
csc /optimize *.cs //run c#checker.cs
C#Checker.exe -h <fqdn> dotnet run -- -h <FQDN> //dotnet run will build project first by default.
``` ```
### How to use C# connector ### How to use C# connector
On Windows system, .NET applications can use the .NET interface of TDengine to perform all database operations. The steps to use it are as follows: On Windows system, .NET applications can use the .NET interface of TDengine to perform all database operations. The steps to use it are as follows:
1. Add the. NET interface file TDengineDrivercs.cs to the .NET project where the application is located. need to install .NET SDK first
2. Users can refer to TDengineTest.cs to define database connection parameters and how to perform data insert, query and other operations; * create a c# project.
``` cmd
mkdir test
cd test
dotnet new console
```
* add TDengineDriver as an package through Nuget
This. NET interface requires the taos.dll file, so before executing the application, copy the taos.dll file in the Windows client install_directory/driver directory to the folder where the. NET project finally generated the .exe executable file. After running the exe file, you can access the TDengine database and do operations such as insert and query. ``` cmd
dotnet add package TDengine.Connector
```
* inlucde the TDnengineDriver in you application's namespace
```C#
using TDengineDriver;
```
* user can reference from[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23/TDengineTest) and learn how to define database connection,query,insert and other basic data manipulations.
**Note:** **Note:**
1. TDengine V2.0. 3.0 supports both 32-bit and 64-bit Windows systems, so when. NET project generates a .exe file, please select the corresponding "X86" or "x64" for the "Platform" under "Solution"/"Project". * TDengine V2.0. 3.0 supports both 32-bit and 64-bit Windows systems, so when. NET project generates a .exe file, please select the corresponding "X86" or "x64" for the "Platform" under "Solution"/"Project".
2. This. NET interface has been verified in Visual Studio 2015/2017, and other VS versions have yet to be verified. * This. NET interface has been verified in Visual Studio 2015/2017, and other VS versions have yet to be verified.
* Since this. NET connector interface requires the taos.dll file, so before executing the application, copy the taos.dll file in the Windows {client_install_directory}/driver directory to the folder where the. NET project finally generated the .exe executable file. After running the exe file, you can access the TDengine database and do operations such as insert and query(This step can be skip if the client has been installed on you machine).
### Third-party Driver ### Third-party Driver
......
[Unit]
Description=Nginx For TDengine Service
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=/usr/local/nginxd/logs/nginx.pid
ExecStart=/usr/local/nginxd/sbin/nginx
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=TDengine server service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/taosd
ExecStartPre=/usr/local/taos/bin/startPre.sh
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[Unit]
Description=TDengine arbitrator service
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/tarbitrator
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
...@@ -14,6 +14,7 @@ set -e ...@@ -14,6 +14,7 @@ set -e
# -d [taos | power | tq | pro | kh | jh] # -d [taos | power | tq | pro | kh | jh]
# -n [2.0.0.3] # -n [2.0.0.3]
# -m [2.0.0.0] # -m [2.0.0.0]
# -H [ false | true]
# set parameters by default value # set parameters by default value
verMode=edge # [cluster, edge] verMode=edge # [cluster, edge]
...@@ -26,8 +27,9 @@ dbName=taos # [taos | power | tq | pro | kh | jh] ...@@ -26,8 +27,9 @@ dbName=taos # [taos | power | tq | pro | kh | jh]
allocator=glibc # [glibc | jemalloc] allocator=glibc # [glibc | jemalloc]
verNumber="" verNumber=""
verNumberComp="1.0.0.0" verNumberComp="1.0.0.0"
httpdBuild=false
while getopts "hv:V:c:o:l:s:d:a:n:m:" arg while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg
do do
case $arg in case $arg in
v) v)
...@@ -70,6 +72,10 @@ do ...@@ -70,6 +72,10 @@ do
#echo "osType=$OPTARG" #echo "osType=$OPTARG"
osType=$(echo $OPTARG) osType=$(echo $OPTARG)
;; ;;
H)
#echo "httpdBuild=$OPTARG"
httpdBuild=$(echo $OPTARG)
;;
h) h)
echo "Usage: `basename $0` -v [cluster | edge] " echo "Usage: `basename $0` -v [cluster | edge] "
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] " echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] "
...@@ -81,6 +87,7 @@ do ...@@ -81,6 +87,7 @@ do
echo " -d [taos | power | tq | pro | kh | jh] " echo " -d [taos | power | tq | pro | kh | jh] "
echo " -n [version number] " echo " -n [version number] "
echo " -m [compatible version number] " echo " -m [compatible version number] "
echo " -H [false | true] "
exit 0 exit 0
;; ;;
?) #unknow option ?) #unknow option
...@@ -90,7 +97,7 @@ do ...@@ -90,7 +97,7 @@ do
esac esac
done done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp}" echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
curr_dir=$(pwd) curr_dir=$(pwd)
...@@ -405,8 +412,7 @@ if [[ "$dbName" == "jh" ]]; then ...@@ -405,8 +412,7 @@ if [[ "$dbName" == "jh" ]]; then
# TODO: src/dnode/CMakeLists.txt # TODO: src/dnode/CMakeLists.txt
fi fi
echo "build ${pagMode} package ..." if [[ "$httpdBuild" == "true" ]]; then
if [[ "$pagMode" == "lite" ]]; then
BUILD_HTTP=true BUILD_HTTP=true
BUILD_TOOLS=false BUILD_TOOLS=false
else else
......
...@@ -700,78 +700,93 @@ function clean_service_on_systemd() { ...@@ -700,78 +700,93 @@ function clean_service_on_systemd() {
function install_service_on_systemd() { function install_service_on_systemd() {
clean_service_on_systemd clean_service_on_systemd
taosd_service_config="${service_config_dir}/taosd.service" [ -f ${script_dir}/cfg/taosd.service ] &&\
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} cp ${script_dir}/cfg/taosd.service \
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" ${service_config_dir}/ || :
${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" ${csudo} systemctl daemon-reload
${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}"
${csudo} bash -c "echo >> ${taosd_service_config}" #taosd_service_config="${service_config_dir}/taosd.service"
${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" #${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" #${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" #${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}" #${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" #${csudo} bash -c "echo >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" #${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" #${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" #${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" #${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Restart=always' >> ${taosd_service_config}" #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
#${csudo} bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}" #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}"
${csudo} bash -c "echo >> ${taosd_service_config}" #${csudo} bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}"
${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" #${csudo} bash -c "echo 'Restart=always' >> ${taosd_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" #${csudo} bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}"
#${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}"
##${csudo} bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}"
#${csudo} bash -c "echo >> ${taosd_service_config}"
#${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}"
#${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}"
${csudo} systemctl enable taosd ${csudo} systemctl enable taosd
tarbitratord_service_config="${service_config_dir}/tarbitratord.service" [ -f ${script_dir}/cfg/tarbitratord.service ] &&\
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" ${csudo} cp ${script_dir}/cfg/tarbitratord.service \
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" ${service_config_dir}/ || :
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" ${csudo} systemctl daemon-reload
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo >> ${tarbitratord_service_config}" #tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" #${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
#${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
#${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
#${csudo} bash -c "echo >> ${tarbitratord_service_config}"
#${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
#${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
#${csudo} systemctl enable tarbitratord #${csudo} systemctl enable tarbitratord
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service" [ -f ${script_dir}/cfg/nginxd.service ] &&\
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" ${csudo} cp ${script_dir}/cfg/nginxd.service \
${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" ${service_config_dir}/ || :
${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" ${csudo} systemctl daemon-reload
${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
${csudo} bash -c "echo >> ${nginx_service_config}" #nginx_service_config="${service_config_dir}/nginxd.service"
${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" #${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" #${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}"
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" #${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" #${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" #${csudo} bash -c "echo >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" #${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" #${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" #${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" #${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" #${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo >> ${nginx_service_config}" #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" #${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" #${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
#${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
#${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
#${csudo} bash -c "echo >> ${nginx_service_config}"
#${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
#${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
if ! ${csudo} systemctl enable nginxd &> /dev/null; then if ! ${csudo} systemctl enable nginxd &> /dev/null; then
${csudo} systemctl daemon-reexec ${csudo} systemctl daemon-reexec
${csudo} systemctl enable nginxd ${csudo} systemctl enable nginxd
......
...@@ -86,6 +86,16 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then ...@@ -86,6 +86,16 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || : cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || :
fi fi
if [ -f "${cfg_dir}/taosd.service" ]; then
cp ${cfg_dir}/taosd.service ${install_dir}/cfg || :
fi
if [ -f "${cfg_dir}/tarbitratord.service" ]; then
cp ${cfg_dir}/tarbitratord.service ${install_dir}/cfg || :
fi
if [ -f "${cfg_dir}/nginxd.service" ]; then
cp ${cfg_dir}/nginxd.service ${install_dir}/cfg || :
fi
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm
......
...@@ -364,7 +364,9 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* ...@@ -364,7 +364,9 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo*
SExprInfo* pExprInfo = tscExprGet(pQueryInfo, j); SExprInfo* pExprInfo = tscExprGet(pQueryInfo, j);
int32_t functionId = pExprInfo->base.functionId; int32_t functionId = pExprInfo->base.functionId;
if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ)) { if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ)) {
orderColIndexList[i] = j; orderColIndexList[i] = j;
break; break;
} }
......
...@@ -152,7 +152,9 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra ...@@ -152,7 +152,9 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra
static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen, static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen,
SSmlLinesInfo* info) { SSmlLinesInfo* info) {
tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id); tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id);
qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv); if (point->tagNum) {
qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv);
}
SStringBuilder sb; memset(&sb, 0, sizeof(sb)); SStringBuilder sb; memset(&sb, 0, sizeof(sb));
char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
...@@ -185,6 +187,18 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa ...@@ -185,6 +187,18 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa
return 0; return 0;
} }
static int32_t buildSmlChildTableName(TAOS_SML_DATA_POINT* point, SSmlLinesInfo* info) {
tscDebug("SML:0x%"PRIx64" taos_sml_insert build child table name", info->id);
char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE;
getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
point->childTableName = calloc(1, tableNameLen+1);
strncpy(point->childTableName, childTableName, tableNameLen);
point->childTableName[tableNameLen] = '\0';
return 0;
}
static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas, SSmlLinesInfo* info) { static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas, SSmlLinesInfo* info) {
int32_t code = 0; int32_t code = 0;
SHashObj* sname2shema = taosHashInit(32, SHashObj* sname2shema = taosHashInit(32,
...@@ -216,12 +230,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, ...@@ -216,12 +230,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
for (int j = 0; j < point->tagNum; ++j) { for (int j = 0; j < point->tagNum; ++j) {
TAOS_SML_KV* tagKv = point->tags + j; TAOS_SML_KV* tagKv = point->tags + j;
if (!point->childTableName) { if (!point->childTableName) {
char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE]; buildSmlChildTableName(point, info);
int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE;
getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
point->childTableName = calloc(1, tableNameLen+1);
strncpy(point->childTableName, childTableName, tableNameLen);
point->childTableName[tableNameLen] = '\0';
} }
code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags, info); code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags, info);
...@@ -231,6 +240,27 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, ...@@ -231,6 +240,27 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
} }
} }
//for Line Protocol tags may be omitted, add a tag with NULL value
if (point->tagNum == 0) {
if (!point->childTableName) {
buildSmlChildTableName(point, info);
}
char tagNullName[TSDB_COL_NAME_LEN] = {0};
size_t nameLen = strlen(tsSmlTagNullName);
strncpy(tagNullName, tsSmlTagNullName, nameLen);
addEscapeCharToString(tagNullName, (int32_t)nameLen);
size_t* pTagNullIdx = taosHashGet(pStableSchema->tagHash, tagNullName, nameLen + TS_ESCAPE_CHAR_SIZE);
if (!pTagNullIdx) {
SSchema tagNull = {0};
tagNull.type = TSDB_DATA_TYPE_NCHAR;
tagNull.bytes = TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
strncpy(tagNull.name, tagNullName, nameLen + TS_ESCAPE_CHAR_SIZE);
taosArrayPush(pStableSchema->tags, &tagNull);
size_t tagNullIdx = taosArrayGetSize(pStableSchema->tags) - 1;
taosHashPut(pStableSchema->tagHash, tagNull.name, nameLen + TS_ESCAPE_CHAR_SIZE, &tagNullIdx, sizeof(tagNullIdx));
}
}
for (int j = 0; j < point->fieldNum; ++j) { for (int j = 0; j < point->fieldNum; ++j) {
TAOS_SML_KV* fieldKv = point->fields + j; TAOS_SML_KV* fieldKv = point->fields + j;
code = buildSmlKvSchema(fieldKv, pStableSchema->fieldHash, pStableSchema->fields, info); code = buildSmlKvSchema(fieldKv, pStableSchema->fieldHash, pStableSchema->fields, info);
...@@ -951,7 +981,7 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam ...@@ -951,7 +981,7 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam
tagKVs[kv->fieldSchemaIdx] = kv; tagKVs[kv->fieldSchemaIdx] = kv;
} }
} }
SArray* tagBinds = taosArrayInit(numTags, sizeof(TAOS_BIND)); SArray* tagBinds = taosArrayInit(numTags, sizeof(TAOS_BIND));
taosArraySetSize(tagBinds, numTags); taosArraySetSize(tagBinds, numTags);
int isNullColBind = TSDB_TRUE; int isNullColBind = TSDB_TRUE;
......
...@@ -237,6 +237,7 @@ extern int8_t tsDeadLockKillQuery; ...@@ -237,6 +237,7 @@ extern int8_t tsDeadLockKillQuery;
// schemaless // schemaless
extern char tsDefaultJSONStrType[]; extern char tsDefaultJSONStrType[];
extern char tsSmlChildTableName[]; extern char tsSmlChildTableName[];
extern char tsSmlTagNullName[];
typedef struct { typedef struct {
......
...@@ -291,7 +291,11 @@ int8_t tsDeadLockKillQuery = 0; ...@@ -291,7 +291,11 @@ int8_t tsDeadLockKillQuery = 0;
// default JSON string type // default JSON string type
char tsDefaultJSONStrType[7] = "nchar"; char tsDefaultJSONStrType[7] = "nchar";
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash. char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value.
//If set to empty system will generate table name using MD5 hash.
char tsSmlTagNullName[TSDB_COL_NAME_LEN] = "_tag_null"; //for line protocol if tag is omitted, add a tag with NULL value
//to make sure inserted records belongs to the same measurement
//default name is _tag_null and can be user configurable
int32_t (*monStartSystemFp)() = NULL; int32_t (*monStartSystemFp)() = NULL;
void (*monStopSystemFp)() = NULL; void (*monStopSystemFp)() = NULL;
...@@ -1701,6 +1705,17 @@ static void doInitGlobalConfig(void) { ...@@ -1701,6 +1705,17 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
// name for a NULL value tag added for Line Protocol when tag fields are omitted
cfg.option = "smlTagNullName";
cfg.ptr = tsSmlTagNullName;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = tListLen(tsSmlTagNullName);
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks // flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
cfg.option = "walFlushSize"; cfg.option = "walFlushSize";
cfg.ptr = &tsdbWalFlushSize; cfg.ptr = &tsdbWalFlushSize;
......
...@@ -19,7 +19,7 @@ using System.Runtime.InteropServices; ...@@ -19,7 +19,7 @@ using System.Runtime.InteropServices;
namespace TDengineDriver namespace TDengineDriver
{ {
enum TDengineDataType public enum TDengineDataType
{ {
TSDB_DATA_TYPE_NULL = 0, // 1 bytes TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
...@@ -33,12 +33,12 @@ namespace TDengineDriver ...@@ -33,12 +33,12 @@ namespace TDengineDriver
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10, // unicode string TSDB_DATA_TYPE_NCHAR = 10, // unicode string
TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes TSDB_DATA_TYPE_USMALLINT = 12,// 2 bytes
TSDB_DATA_TYPE_UINT = 13, // 4 bytes TSDB_DATA_TYPE_UINT = 13, // 4 bytes
TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes TSDB_DATA_TYPE_UBIGINT = 14 // 8 bytes
} }
enum TDengineInitOption public enum TDengineInitOption
{ {
TSDB_OPTION_LOCALE = 0, TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1, TSDB_OPTION_CHARSET = 1,
...@@ -47,7 +47,7 @@ namespace TDengineDriver ...@@ -47,7 +47,7 @@ namespace TDengineDriver
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
} }
class TDengineMeta public class TDengineMeta
{ {
public string name; public string name;
public short size; public short size;
...@@ -90,7 +90,7 @@ namespace TDengineDriver ...@@ -90,7 +90,7 @@ namespace TDengineDriver
} }
} }
class TDengine public class TDengine
{ {
public const int TSDB_CODE_SUCCESS = 0; public const int TSDB_CODE_SUCCESS = 0;
......
Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4 Subproject commit 25f8683ece07897fea12c347d369602b2235665f
TDengine Grafana plugin is no more part of the TDengine repo. Please check it out from https://github.com/taosdata/grafanaplugin.
Subproject commit 792ef7c3036f15068796e09883d3f4d47a038fe2
TDengine Grafana plugin is no more part of the TDengine repo. Please check it out from https://github.com/taosdata/grafanaplugin.
...@@ -107,16 +107,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti ...@@ -107,16 +107,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
public void setCatalog(String catalog) throws SQLException { public void setCatalog(String catalog) throws SQLException {
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
/*
try (Statement stmt = createStatement()) {
boolean execute = stmt.execute("use " + catalog);
if (execute)
this.catalog = catalog;
} catch (SQLException e) {
// do nothing
}
*/
this.catalog = catalog; this.catalog = catalog;
} }
...@@ -416,7 +406,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti ...@@ -416,7 +406,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
} catch (InterruptedException | ExecutionException ignored) { } catch (InterruptedException | ExecutionException ignored) {
} catch (TimeoutException e) { } catch (TimeoutException e) {
future.cancel(true); future.cancel(true);
status = false;
} finally { } finally {
executor.shutdownNow(); executor.shutdownNow();
} }
......
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc; package com.taosdata.jdbc;
import java.sql.*; import java.sql.*;
...@@ -66,7 +52,7 @@ public class TSDBConnection extends AbstractConnection { ...@@ -66,7 +52,7 @@ public class TSDBConnection extends AbstractConnection {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
} }
long id = this.connector.subscribe(topic, sql, restart, 0); long id = this.connector.subscribe(topic, sql, restart);
if (id == 0) { if (id == 0) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED);
} }
......
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc; package com.taosdata.jdbc;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.sql.*; import java.sql.*;
import java.util.*; import java.util.Properties;
import java.util.StringTokenizer;
import java.util.logging.Logger; import java.util.logging.Logger;
/** /**
...@@ -139,7 +124,7 @@ public class TSDBDriver extends AbstractDriver { ...@@ -139,7 +124,7 @@ public class TSDBDriver extends AbstractDriver {
} catch (SQLException sqlEx) { } catch (SQLException sqlEx) {
throw sqlEx; throw sqlEx;
} catch (Exception ex) { } catch (Exception ex) {
throw new SQLException("SQLException:" + ex.toString(), ex); throw new SQLException("SQLException:" + ex, ex);
} }
} }
...@@ -152,7 +137,7 @@ public class TSDBDriver extends AbstractDriver { ...@@ -152,7 +137,7 @@ public class TSDBDriver extends AbstractDriver {
public boolean acceptsURL(String url) throws SQLException { public boolean acceptsURL(String url) throws SQLException {
if (url == null) if (url == null)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET);
return url.length() > 0 && url.trim().length() > 0 && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1)); return url.trim().length() > 0 && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1));
} }
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
......
/**
* *************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
* <p>
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
* <p>
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
* <p>
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* ***************************************************************************
*/
package com.taosdata.jdbc; package com.taosdata.jdbc;
import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.JSONObject;
...@@ -261,8 +245,8 @@ public class TSDBJNIConnector { ...@@ -261,8 +245,8 @@ public class TSDBJNIConnector {
/** /**
* Create a subscription * Create a subscription
*/ */
long subscribe(String topic, String sql, boolean restart, int period) { long subscribe(String topic, String sql, boolean restart) {
return subscribeImp(this.taos, restart, topic, sql, period); return subscribeImp(this.taos, restart, topic, sql, 0);
} }
private native long subscribeImp(long connection, boolean restart, String topic, String sql, int period); private native long subscribeImp(long connection, boolean restart, String topic, String sql, int period);
...@@ -285,16 +269,6 @@ public class TSDBJNIConnector { ...@@ -285,16 +269,6 @@ public class TSDBJNIConnector {
private native void unsubscribeImp(long subscription, boolean isKeep); private native void unsubscribeImp(long subscription, boolean isKeep);
/**
* Validate if a <I>create table</I> SQL statement is correct without actually creating that table
*/
public boolean validateCreateTableSql(String sql) {
int res = validateCreateTableSqlImp(taos, sql.getBytes());
return res == 0;
}
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
public long prepareStmt(String sql) throws SQLException { public long prepareStmt(String sql) throws SQLException {
long stmt = prepareStmtImp(sql.getBytes(), this.taos); long stmt = prepareStmtImp(sql.getBytes(), this.taos);
......
...@@ -39,7 +39,7 @@ public class RestfulDriver extends AbstractDriver { ...@@ -39,7 +39,7 @@ public class RestfulDriver extends AbstractDriver {
String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041"); String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041");
String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null; String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null;
String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + ""; String loginUrl;
try { try {
if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER)) if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED);
...@@ -53,8 +53,8 @@ public class RestfulDriver extends AbstractDriver { ...@@ -53,8 +53,8 @@ public class RestfulDriver extends AbstractDriver {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
} }
int poolSize = Integer.valueOf(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); int poolSize = Integer.parseInt(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE));
boolean keepAlive = Boolean.valueOf(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE)); boolean keepAlive = Boolean.parseBoolean(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE));
HttpClientPoolUtil.init(poolSize, keepAlive); HttpClientPoolUtil.init(poolSize, keepAlive);
String result = HttpClientPoolUtil.execute(loginUrl); String result = HttpClientPoolUtil.execute(loginUrl);
...@@ -79,7 +79,7 @@ public class RestfulDriver extends AbstractDriver { ...@@ -79,7 +79,7 @@ public class RestfulDriver extends AbstractDriver {
public boolean acceptsURL(String url) throws SQLException { public boolean acceptsURL(String url) throws SQLException {
if (url == null) if (url == null)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET);
return url.length() > 0 && url.trim().length() > 0 && url.startsWith(URL_PREFIX); return url.trim().length() > 0 && url.startsWith(URL_PREFIX);
} }
@Override @Override
......
...@@ -65,7 +65,11 @@ public class RestfulStatement extends AbstractStatement { ...@@ -65,7 +65,11 @@ public class RestfulStatement extends AbstractStatement {
boolean result = true; boolean result = true;
if (SqlSyntaxValidator.isUseSql(sql)) { if (SqlSyntaxValidator.isUseSql(sql)) {
HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); String ret = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
JSONObject resultJson = JSON.parseObject(ret);
if (resultJson.getString("status").equals("error")) {
throw TSDBError.createSQLException(resultJson.getInteger("code"), "sql: " + sql + ", desc: " + resultJson.getString("desc"));
}
this.database = sql.trim().replace("use", "").trim(); this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database); this.conn.setCatalog(this.database);
result = false; result = false;
...@@ -114,7 +118,7 @@ public class RestfulStatement extends AbstractStatement { ...@@ -114,7 +118,7 @@ public class RestfulStatement extends AbstractStatement {
String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
JSONObject resultJson = JSON.parseObject(result); JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) { if (resultJson.getString("status").equals("error")) {
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); throw TSDBError.createSQLException(resultJson.getInteger("code"), "sql: " + sql + ", desc: " + resultJson.getString("desc"));
} }
this.resultSet = new RestfulResultSet(database, this, resultJson); this.resultSet = new RestfulResultSet(database, this, resultJson);
this.affectedRows = -1; this.affectedRows = -1;
...@@ -125,7 +129,7 @@ public class RestfulStatement extends AbstractStatement { ...@@ -125,7 +129,7 @@ public class RestfulStatement extends AbstractStatement {
String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
JSONObject jsonObject = JSON.parseObject(result); JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) { if (jsonObject.getString("status").equals("error")) {
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc")); throw TSDBError.createSQLException(jsonObject.getInteger("code"), "sql: " + sql + ", desc: " + jsonObject.getString("desc"));
} }
this.resultSet = null; this.resultSet = null;
this.affectedRows = getAffectedRows(jsonObject); this.affectedRows = getAffectedRows(jsonObject);
...@@ -133,16 +137,14 @@ public class RestfulStatement extends AbstractStatement { ...@@ -133,16 +137,14 @@ public class RestfulStatement extends AbstractStatement {
} }
private int getAffectedRows(JSONObject jsonObject) throws SQLException { private int getAffectedRows(JSONObject jsonObject) throws SQLException {
// create ... SQLs should return 0 , and Restful result like this:
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
JSONArray head = jsonObject.getJSONArray("head"); JSONArray head = jsonObject.getJSONArray("head");
if (head.size() != 1 || !"affected_rows".equals(head.getString(0))) if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variable: [" + head.toJSONString() + "]");
JSONArray data = jsonObject.getJSONArray("data"); JSONArray data = jsonObject.getJSONArray("data");
if (data != null) { if (data != null) {
return data.getJSONArray(0).getInteger(0); return data.getJSONArray(0).getInteger(0);
} }
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variable: [" + jsonObject.toJSONString() + "]");
} }
@Override @Override
......
...@@ -8,8 +8,13 @@ import java.sql.SQLException; ...@@ -8,8 +8,13 @@ import java.sql.SQLException;
public class ConnectWrongDatabaseTest { public class ConnectWrongDatabaseTest {
@Test(expected = SQLException.class) @Test(expected = SQLException.class)
public void connect() throws SQLException { public void connectByJni() throws SQLException {
DriverManager.getConnection("jdbc:TAOS://localhost:6030/wrong_db?user=root&password=taosdata"); DriverManager.getConnection("jdbc:TAOS://localhost:6030/wrong_db?user=root&password=taosdata");
} }
@Test(expected = SQLException.class)
public void connectByRestful() throws SQLException {
DriverManager.getConnection("jdbc:TAOS-RS://localhost:6041/wrong_db?user=root&password=taosdata");
}
} }
...@@ -10,9 +10,8 @@ const ArrayType = require('ref-array-napi'); ...@@ -10,9 +10,8 @@ const ArrayType = require('ref-array-napi');
const Struct = require('ref-struct-napi'); const Struct = require('ref-struct-napi');
const FieldTypes = require('./constants'); const FieldTypes = require('./constants');
const errors = require('./error'); const errors = require('./error');
const _ = require('lodash')
const TaosObjects = require('./taosobjects'); const TaosObjects = require('./taosobjects');
const { NULL_POINTER } = require('ref-napi');
const { Console } = require('console');
module.exports = CTaosInterface; module.exports = CTaosInterface;
...@@ -223,6 +222,8 @@ TaosField.fields.name.type.size = 65; ...@@ -223,6 +222,8 @@ TaosField.fields.name.type.size = 65;
TaosField.defineProperty('type', ref.types.char); TaosField.defineProperty('type', ref.types.char);
TaosField.defineProperty('bytes', ref.types.short); TaosField.defineProperty('bytes', ref.types.short);
//define schemaless line array
var smlLine = ArrayType(ref.coerceType('char *'))
/** /**
* *
...@@ -238,7 +239,6 @@ function CTaosInterface(config = null, pass = false) { ...@@ -238,7 +239,6 @@ function CTaosInterface(config = null, pass = false) {
ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); ref.types.void_ptr2 = ref.refType(ref.types.void_ptr);
/*Declare a bunch of functions first*/ /*Declare a bunch of functions first*/
/* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */ /* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */
if ('win32' == os.platform()) { if ('win32' == os.platform()) {
taoslibname = 'taos'; taoslibname = 'taos';
} else { } else {
...@@ -303,9 +303,15 @@ function CTaosInterface(config = null, pass = false) { ...@@ -303,9 +303,15 @@ function CTaosInterface(config = null, pass = false) {
// int64_t stime, void *param, void (*callback)(void *)); // int64_t stime, void *param, void (*callback)(void *));
'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]], 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]],
//void taos_close_stream(TAOS_STREAM *tstr); //void taos_close_stream(TAOS_STREAM *tstr);
'taos_close_stream': [ref.types.void, [ref.types.void_ptr]] 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]],
//Schemaless insert
//TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int protocol,int precision)
// 'taos_schemaless_insert': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.int, ref.types.int, ref.types.int]]
'taos_schemaless_insert': [ref.types.void_ptr, [ref.types.void_ptr, smlLine, 'int', 'int', 'int']]
}); });
if (pass == false) { if (pass == false) {
if (config == null) { if (config == null) {
this._config = ref.alloc(ref.types.char_ptr, ref.NULL); this._config = ref.alloc(ref.types.char_ptr, ref.NULL);
...@@ -664,3 +670,38 @@ CTaosInterface.prototype.closeStream = function closeStream(stream) { ...@@ -664,3 +670,38 @@ CTaosInterface.prototype.closeStream = function closeStream(stream) {
this.libtaos.taos_close_stream(stream); this.libtaos.taos_close_stream(stream);
console.log("Closed stream"); console.log("Closed stream");
} }
//Schemaless insert API
/**
* TAOS* taos, char* lines[], int numLines, int protocol,int precision)
* using taos_errstr get error info, taos_errno get error code. Remmember
* to release taos_res, otherwile will lead memory leak.
* TAOS schemaless insert api
* @param {*} connection a valid database connection
* @param {*} lines string data, which statisfied with line proctocol
* @param {*} numLines number of rows in param lines.
* @param {*} protocal Line protocol, enum type (0,1,2,3),indicate different line protocol
* @param {*} precision timestamp precision in lines, enum type (0,1,2,3,4,5,6)
* @returns TAOS_RES
*
*/
CTaosInterface.prototype.schemalessInsert = function schemalessInsert(connection,lines, protocal, precision) {
let _numLines = null;
let _lines = null;
if(_.isString(lines)){
_numLines = 1;
_lines = Buffer.alloc(_numLines * ref.sizeof.pointer);
ref.set(_lines,0,ref.allocCString(lines),ref.types.char_ptr);
}
else if(_.isArray(lines)){
_numLines = lines.length;
_lines = Buffer.alloc(_numLines * ref.sizeof.pointer);
for(let i = 0; i < _numLines ; i++){
ref.set(_lines,i*ref.sizeof.pointer,ref.allocCString(lines[i]),ref.types.char_ptr)
}
}
else{
throw new errors.InterfaceError("Unsupport lines input")
}
return this.libtaos.taos_schemaless_insert(connection, _lines, _numLines, protocal, precision);
}
const SCHEMALESS_PROTOCOL = {
TSDB_SML_UNKNOWN_PROTOCOL: 0,
TSDB_SML_LINE_PROTOCOL: 1,
TSDB_SML_TELNET_PROTOCOL: 2,
TSDB_SML_JSON_PROTOCOL: 3
}
const SCHEMALESS_PRECISION = {
TSDB_SML_TIMESTAMP_NOT_CONFIGURED : 0,
TSDB_SML_TIMESTAMP_HOURS : 1,
TSDB_SML_TIMESTAMP_MINUTES : 2,
TSDB_SML_TIMESTAMP_SECONDS : 3,
TSDB_SML_TIMESTAMP_MILLI_SECONDS : 4,
TSDB_SML_TIMESTAMP_MICRO_SECONDS : 5,
TSDB_SML_TIMESTAMP_NANO_SECONDS : 6
}
const typeCodesToName = {
0: 'Null',
1: 'Boolean',
2: 'Tiny Int',
3: 'Small Int',
4: 'Int',
5: 'Big Int',
6: 'Float',
7: 'Double',
8: 'Binary',
9: 'Timestamp',
10: 'Nchar',
11: 'Tinyint Unsigned',
12: 'Smallint Unsigned',
13: 'Int Unsigned',
14: 'Bigint Unsigned',
}
/**
* @function
* @param {number} typecode - The code to get the name of the type for
* @return {string} Name of the field type
*/
function getType(typecode) {
return typeCodesToName[typecode];
}
/** /**
* Contains the the definitions/values assigned to various field types * Contains the the definitions/values assigned to various field types
* @module FieldTypes * @module FieldTypes
...@@ -18,71 +60,45 @@ ...@@ -18,71 +60,45 @@
* @property {number} C_TIMESTAMP - Timestamp in format "YYYY:MM:DD HH:MM:SS.MMM". Measured in number of milliseconds passed after * @property {number} C_TIMESTAMP - Timestamp in format "YYYY:MM:DD HH:MM:SS.MMM". Measured in number of milliseconds passed after
1970-01-01 08:00:00.000 GMT. 1970-01-01 08:00:00.000 GMT.
* @property {number} C_NCHAR - NChar field type encoded in ASCII, a wide string. * @property {number} C_NCHAR - NChar field type encoded in ASCII, a wide string.
* *
*
*
* @property {number} C_TIMESTAMP_MILLI - The code for millisecond timestamps, as returned by libtaos.taos_result_precision(result). * @property {number} C_TIMESTAMP_MILLI - The code for millisecond timestamps, as returned by libtaos.taos_result_precision(result).
* @property {number} C_TIMESTAMP_MICRO - The code for microsecond timestamps, as returned by libtaos.taos_result_precision(result). * @property {number} C_TIMESTAMP_MICRO - The code for microsecond timestamps, as returned by libtaos.taos_result_precision(result).
*/ */
module.exports = { module.exports = {
C_NULL : 0, C_NULL: 0,
C_BOOL : 1, C_BOOL: 1,
C_TINYINT : 2, C_TINYINT: 2,
C_SMALLINT : 3, C_SMALLINT: 3,
C_INT : 4, C_INT: 4,
C_BIGINT : 5, C_BIGINT: 5,
C_FLOAT : 6, C_FLOAT: 6,
C_DOUBLE : 7, C_DOUBLE: 7,
C_BINARY : 8, C_BINARY: 8,
C_TIMESTAMP : 9, C_TIMESTAMP: 9,
C_NCHAR : 10, C_NCHAR: 10,
C_TINYINT_UNSIGNED : 11, C_TINYINT_UNSIGNED: 11,
C_SMALLINT_UNSIGNED : 12, C_SMALLINT_UNSIGNED: 12,
C_INT_UNSIGNED : 13, C_INT_UNSIGNED: 13,
C_BIGINT_UNSIGNED : 14, C_BIGINT_UNSIGNED: 14,
// NULL value definition // NULL value definition
// NOTE: These values should change according to C definition in tsdb.h // NOTE: These values should change according to C definition in tsdb.h
C_BOOL_NULL : 2, C_BOOL_NULL: 2,
C_TINYINT_NULL : -128, C_TINYINT_NULL: -128,
C_TINYINT_UNSIGNED_NULL : 255, C_TINYINT_UNSIGNED_NULL: 255,
C_SMALLINT_NULL : -32768, C_SMALLINT_NULL: -32768,
C_SMALLINT_UNSIGNED_NULL : 65535, C_SMALLINT_UNSIGNED_NULL: 65535,
C_INT_NULL : -2147483648, C_INT_NULL: -2147483648,
C_INT_UNSIGNED_NULL : 4294967295, C_INT_UNSIGNED_NULL: 4294967295,
C_BIGINT_NULL : -9223372036854775808n, C_BIGINT_NULL: -9223372036854775808n,
C_BIGINT_UNSIGNED_NULL : 18446744073709551615n, C_BIGINT_UNSIGNED_NULL: 18446744073709551615n,
C_FLOAT_NULL : 2146435072, C_FLOAT_NULL: 2146435072,
C_DOUBLE_NULL : -9223370937343148032, C_DOUBLE_NULL: -9223370937343148032,
C_NCHAR_NULL : 4294967295, C_NCHAR_NULL: 4294967295,
C_BINARY_NULL : 255, C_BINARY_NULL: 255,
C_TIMESTAMP_MILLI : 0, C_TIMESTAMP_MILLI: 0,
C_TIMESTAMP_MICRO : 1, C_TIMESTAMP_MICRO: 1,
getType, getType,
} SCHEMALESS_PROTOCOL,
SCHEMALESS_PRECISION
const typeCodesToName = {
0 : 'Null',
1 : 'Boolean',
2 : 'Tiny Int',
3 : 'Small Int',
4 : 'Int',
5 : 'Big Int',
6 : 'Float',
7 : 'Double',
8 : 'Binary',
9 : 'Timestamp',
10 : 'Nchar',
11 : 'TINYINT_UNSIGNED',
12 : 'SMALLINT_UNSIGNED',
13 : 'INT_UNSIGNED',
14 : 'BIGINT_UNSIGNED',
} }
/**
* @function
* @param {number} typecode - The code to get the name of the type for
* @return {string} Name of the field type
*/
function getType(typecode) {
return typeCodesToName[typecode];
}
...@@ -211,7 +211,7 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) { ...@@ -211,7 +211,7 @@ TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
} }
} }
performance.mark('B'); performance.mark('B');
performance.measure('query', 'A', 'B'); performance.measure('query', 'A', 'B');
let response = this._createSetResponse(this._rowcount, time) let response = this._createSetResponse(this._rowcount, time)
...@@ -474,3 +474,21 @@ TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = ...@@ -474,3 +474,21 @@ TDengineCursor.prototype.openStream = function openStream(sql, callback, stime =
TDengineCursor.prototype.closeStream = function closeStream(stream) { TDengineCursor.prototype.closeStream = function closeStream(stream) {
this._chandle.closeStream(stream); this._chandle.closeStream(stream);
} }
/**
* schemaless insert
* @param {*} connection a valid database connection
* @param {*} lines string data, which statisfied with line proctocol
* @param {*} protocal Line protocol, enum type (0,1,2,3),indicate different line protocol
* @param {*} precision timestamp precision in lines, enum type (0,1,2,3,4,5,6)
* @returns TAOS_RES
*
*/
TDengineCursor.prototype.schemalessInsert = function schemalessInsert(lines, protocol, precision) {
this._result = this._chandle.schemalessInsert(this._connection._conn, lines, protocol, precision);
let errorNo = this._chandle.errno(this._result);
if (errorNo != 0) {
throw new errors.InterfaceError(errorNo + ":" + this._chandle.errStr(this._result));
this._chandle.freeResult(this._result);
}
this._chandle.freeResult(this._result);
}
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
"test": "test" "test": "test"
}, },
"scripts": { "scripts": {
"test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js && node test/testUnsignedType.js " "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js && node test/testUnsignedType.js && node test/testSchemalessInsert.js "
}, },
"repository": { "repository": {
"type": "git", "type": "git",
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
"homepage": "https://github.com/taosdata/tdengine#readme", "homepage": "https://github.com/taosdata/tdengine#readme",
"dependencies": { "dependencies": {
"ffi-napi": "^3.1.0", "ffi-napi": "^3.1.0",
"lodash": "^4.17.21",
"ref-array-napi": "^1.2.1", "ref-array-napi": "^1.2.1",
"ref-napi": "^1.5.2", "ref-napi": "^1.5.2",
"ref-struct-napi": "^1.1.1" "ref-struct-napi": "^1.1.1"
......
var TDengineConnection = require('./nodetaos/connection.js') var TDengineConnection = require('./nodetaos/connection.js')
module.exports.connect = function (connection={}) { const TDengineConstant = require('./nodetaos/constants.js')
return new TDengineConnection(connection); module.exports = {
} connect: function (connection = {}) {
return new TDengineConnection(connection);
},
SCHEMALESS_PROTOCOL: TDengineConstant.SCHEMALESS_PROTOCOL,
SCHEMALESS_PRECISION: TDengineConstant.SCHEMALESS_PRECISION,
}
\ No newline at end of file
const _ = require('lodash');
const taos = require('../tdengine');
var conn = taos.connect({ host: "127.0.0.1", user: "root", password: "taosdata", config: "/etc/taos", port: 10 });
var c1 = conn.cursor();
executeUpdate("drop database if exists nodedb;");
executeUpdate("create database if not exists nodedb ;");
executeUpdate("use nodedb;");
let tbname1 = "line_protocol_arr";
let tbname2 = "json_protocol_arr";
let tbname3 = "json_protocol_str";
let tbname4 = "line_protocol_str";
let line1 = [tbname1 + ",t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
tbname1 + ",t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833641000000"
];
let line2 = ["{"
+ "\"metric\": \"" + tbname2 + "\","
+ "\"timestamp\": 1626006833,"
+ "\"value\": 10,"
+ "\"tags\": {"
+ " \"t1\": true,"
+ "\"t2\": false,"
+ "\"t3\": 10,"
+ "\"t4\": \"123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>\""
+ "}"
+ "}"
];
let line3 = "{"
+ "\"metric\": \"" + tbname3 + "\","
+ "\"timestamp\": 1626006833000,"
+ "\"value\": 10,"
+ "\"tags\": {"
+ " \"t1\": true,"
+ "\"t2\": false,"
+ "\"t3\": 10,"
+ "\"t4\": \"123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>\""
+ "}"
+ "}";
let line4 = tbname4 + ",t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639";
try {
c1.schemalessInsert(line1, taos.SCHEMALESS_PROTOCOL.TSDB_SML_LINE_PROTOCOL, taos.SCHEMALESS_PRECISION.TSDB_SML_TIMESTAMP_NANO_SECONDS);
testSchemaless(tbname1, line1.length);
c1.schemalessInsert(line2, taos.SCHEMALESS_PROTOCOL.TSDB_SML_JSON_PROTOCOL, taos.SCHEMALESS_PRECISION.TSDB_SML_TIMESTAMP_SECONDS);
testSchemaless(tbname2, line2.length);
c1.schemalessInsert(line3, taos.SCHEMALESS_PROTOCOL.TSDB_SML_JSON_PROTOCOL, taos.SCHEMALESS_PRECISION.TSDB_SML_TIMESTAMP_MILLI_SECONDS);
testSchemaless(tbname3, 1);
c1.schemalessInsert(line4, taos.SCHEMALESS_PROTOCOL.TSDB_SML_LINE_PROTOCOL, taos.SCHEMALESS_PRECISION.TSDB_SML_TIMESTAMP_MILLI_SECONDS);
testSchemaless(tbname4, 1);
} catch (err) {
console.log(err)
}
function executeUpdate(sql) {
console.log(sql);
c1.execute(sql);
}
function testSchemaless(tbname, numLines) {
let sql = "select count(*) from " + tbname + ";";
executeUpdate(sql);
let affectRows = _.first(c1.fetchall());
if (affectRows != numLines) {
console.log(1);
console.log(line2);
throw "protocol " + tbname + " schemaless insert success,but can't select as expect."
}
else {
console.log("protocol " + tbname + " schemaless insert success, can select as expect.")
}
console.log("===================")
}
setTimeout(() => conn.close(), 2000);
...@@ -51,7 +51,7 @@ conn.close() ...@@ -51,7 +51,7 @@ conn.close()
import taos import taos
conn = taos.connect() conn = taos.connect()
conn.exec("create database if not exists pytest") conn.execute("create database if not exists pytest")
result = conn.query("show databases") result = conn.query("show databases")
num_of_fields = result.field_count num_of_fields = result.field_count
...@@ -60,7 +60,7 @@ for field in result.fields: ...@@ -60,7 +60,7 @@ for field in result.fields:
for row in result: for row in result:
print(row) print(row)
result.close() result.close()
conn.exec("drop database pytest") conn.execute("drop database pytest")
conn.close() conn.close()
``` ```
...@@ -136,11 +136,11 @@ from taos import * ...@@ -136,11 +136,11 @@ from taos import *
conn = connect() conn = connect()
dbname = "pytest_taos_stmt" dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname) conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
conn.exec( conn.execute(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \ su smallint unsigned, iu int unsigned, bu bigint unsigned, \
...@@ -196,11 +196,11 @@ from taos import * ...@@ -196,11 +196,11 @@ from taos import *
conn = connect() conn = connect()
dbname = "pytest_taos_stmt" dbname = "pytest_taos_stmt"
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname) conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
conn.exec( conn.execute(
"create table if not exists log(ts timestamp, bo bool, nil tinyint, \ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \
ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
su smallint unsigned, iu int unsigned, bu bigint unsigned, \ su smallint unsigned, iu int unsigned, bu bigint unsigned, \
...@@ -249,12 +249,12 @@ import taos ...@@ -249,12 +249,12 @@ import taos
conn = taos.connect() conn = taos.connect()
dbname = "pytest_taos_subscribe_callback" dbname = "pytest_taos_subscribe_callback"
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname) conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)") conn.execute("create table if not exists log(ts timestamp, n int)")
for i in range(10): for i in range(10):
conn.exec("insert into log values(now, %d)" % i) conn.execute("insert into log values(now, %d)" % i)
sub = conn.subscribe(True, "test", "select * from log", 1000) sub = conn.subscribe(True, "test", "select * from log", 1000)
print("# consume from begin") print("# consume from begin")
...@@ -263,14 +263,14 @@ for ts, n in sub.consume(): ...@@ -263,14 +263,14 @@ for ts, n in sub.consume():
print("# consume new data") print("# consume new data")
for i in range(5): for i in range(5):
conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i)) conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i))
result = sub.consume() result = sub.consume()
for ts, n in result: for ts, n in result:
print(ts, n) print(ts, n)
print("# consume with a stop condition") print("# consume with a stop condition")
for i in range(10): for i in range(10):
conn.exec("insert into log values(now, %d)" % int(random() * 10)) conn.execute("insert into log values(now, %d)" % int(random() * 10))
result = sub.consume() result = sub.consume()
try: try:
ts, n = next(result) ts, n = next(result)
...@@ -284,7 +284,7 @@ for i in range(10): ...@@ -284,7 +284,7 @@ for i in range(10):
sub.close() sub.close()
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
``` ```
...@@ -311,23 +311,23 @@ def test_subscribe_callback(conn): ...@@ -311,23 +311,23 @@ def test_subscribe_callback(conn):
# type: (TaosConnection) -> None # type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback" dbname = "pytest_taos_subscribe_callback"
try: try:
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname) conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)") conn.execute("create table if not exists log(ts timestamp, n int)")
print("# subscribe with callback") print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback) sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
for i in range(10): for i in range(10):
conn.exec("insert into log values(now, %d)" % i) conn.execute("insert into log values(now, %d)" % i)
time.sleep(0.7) time.sleep(0.7)
sub.close() sub.close()
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
except Exception as err: except Exception as err:
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
raise err raise err
...@@ -374,10 +374,10 @@ def test_stream(conn): ...@@ -374,10 +374,10 @@ def test_stream(conn):
# type: (TaosConnection) -> None # type: (TaosConnection) -> None
dbname = "pytest_taos_stream" dbname = "pytest_taos_stream"
try: try:
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s" % dbname) conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
conn.exec("create table if not exists log(ts timestamp, n int)") conn.execute("create table if not exists log(ts timestamp, n int)")
result = conn.query("select count(*) from log interval(5s)") result = conn.query("select count(*) from log interval(5s)")
assert result.field_count == 2 assert result.field_count == 2
...@@ -386,13 +386,13 @@ def test_stream(conn): ...@@ -386,13 +386,13 @@ def test_stream(conn):
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter)) stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
for _ in range(0, 20): for _ in range(0, 20):
conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)") conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
time.sleep(2) time.sleep(2)
stream.close() stream.close()
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
except Exception as err: except Exception as err:
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
raise err raise err
...@@ -408,8 +408,8 @@ import taos ...@@ -408,8 +408,8 @@ import taos
conn = taos.connect() conn = taos.connect()
dbname = "pytest_line" dbname = "pytest_line"
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.exec("create database if not exists %s precision 'us'" % dbname) conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
lines = [ lines = [
...@@ -431,7 +431,7 @@ for row in result: ...@@ -431,7 +431,7 @@ for row in result:
result.close() result.close()
conn.exec("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
``` ```
......
...@@ -2,7 +2,6 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) ...@@ -2,7 +2,6 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine) PROJECT(TDengine)
ADD_SUBDIRECTORY(shell) ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
IF (TD_TAOS_TOOLS) IF (TD_TAOS_TOOLS)
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/src/kit/taos_tools/deps/avro/lang/c/src) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/src/kit/taos_tools/deps/avro/lang/c/src)
......
Subproject commit 7ed1e6b485d04cc93c4bb0bc9e844086da1b4714 Subproject commit b76b5a76756a5c6530ba1d418de51fd336ae23b1
CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(inc)
FIND_PACKAGE(Git)
IF (GIT_FOUND)
MESSAGE("Git found")
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1)
IF ("${TAOSDEMO_COMMIT_SHA1}" STREQUAL "")
SET(TAOSDEMO_COMMIT_SHA1 "unknown")
ELSE ()
STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1)
STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1)
ENDIF ()
EXECUTE_PROCESS(
COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
IF (TD_LINUX)
EXECUTE_PROCESS(
COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'"
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE TAOSDEMO_STATUS)
ENDIF (TD_LINUX)
ELSE()
MESSAGE("Git not found")
SET(TAOSDEMO_COMMIT_SHA1 "unknown")
SET(TAOSDEMO_STATUS "unknown")
ENDIF (GIT_FOUND)
MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1})
STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS)
IF (TAOSDEMO_STATUS MATCHES "M")
SET(TAOSDEMO_STATUS "modified")
ELSE()
SET(TAOSDEMO_STATUS "")
ENDIF ()
MESSAGE("taosdemo's status is:" ${TAOSDEMO_STATUS})
ADD_DEFINITIONS(-DTAOSDEMO_COMMIT_SHA1="${TAOSDEMO_COMMIT_SHA1}")
ADD_DEFINITIONS(-DTAOSDEMO_STATUS="${TAOSDEMO_STATUS}")
MESSAGE("TD_VER_NUMBER is:" ${TD_VER_NUMBER})
IF ("${TD_VER_NUMBER}" STREQUAL "")
SET(TD_VERSION_NUMBER "TDengine-version-unknown")
ELSE()
SET(TD_VERSION_NUMBER ${TD_VER_NUMBER})
ENDIF ()
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
ELSE ()
SET(LINK_JEMALLOC "")
ENDIF ()
IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(./src SRC)
ADD_EXECUTABLE(taosdemo ${SRC})
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua ${LINK_JEMALLOC})
ELSE ()
TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC})
ENDIF ()
ELSEIF (TD_WINDOWS)
AUX_SOURCE_DIRECTORY(./src SRC)
ADD_EXECUTABLE(taosdemo ${SRC})
SET_SOURCE_FILES_PROPERTIES(./src/demoUtil.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoData.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoInsert.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoCommandOpt.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoQuery.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoMain.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoSubscribe.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoOutput.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoJsonOpt.c PROPERTIES COMPILE_FLAGS -w)
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
ELSE ()
TARGET_LINK_LIBRARIES(taosdemo taos cJson lua)
ENDIF ()
ELSEIF (TD_DARWIN)
# missing a few dependencies, such as <argp.h>
AUX_SOURCE_DIRECTORY(./src SRC)
ADD_EXECUTABLE(taosdemo ${SRC})
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
ELSE ()
TARGET_LINK_LIBRARIES(taosdemo taos cJson lua)
ENDIF ()
ENDIF ()
{
"filetype": "subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "test",
"specified_table_query": {
"concurrent": 1,
"mode": "async",
"interval": 1000,
"restart": "yes",
"keepProgress": "yes",
"resubAfterConsume": 10,
"sqls": [
{
"sql": "select col1 from meters where col1 > 1;",
"result": "./subscribe_res0.txt"
},
{
"sql": "select col2 from meters where col2 > 1;",
"result": "./subscribe_res2.txt"
}
]
},
"super_table_query": {
"stblname": "meters",
"threads": 1,
"mode": "sync",
"interval": 1000,
"restart": "yes",
"keepProgress": "yes",
"sqls": [
{
"sql": "select col1 from xxxx where col1 > 10;",
"result": "./subscribe_res1.txt"
}
]
}
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DEMO__
#define __DEMO__
#include <stdint.h>
#include <taos.h>
#include <taoserror.h>
#define _GNU_SOURCE
#define CURL_STATICLIB
#ifdef LINUX
#include <argp.h>
#include <inttypes.h>
#ifndef _ALPINE
#include <error.h>
#endif
#include <pthread.h>
#include <regex.h>
#include <semaphore.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#include <wordexp.h>
#else
#include <regex.h>
#include <stdio.h>
#endif
#include <assert.h>
#include <stdlib.h>
// #include "os.h"
#include "taos.h"
#include "taoserror.h"
#include "tutil.h"
#define REQ_EXTRA_BUF_LEN 1024
#define RESP_BUF_LEN 4096
#define SQL_BUFF_LEN 1024
extern char configDir[];
#define STR_INSERT_INTO "INSERT INTO "
#define MAX_RECORDS_PER_REQ 32766
#define HEAD_BUFF_LEN \
TSDB_MAX_COLUMNS * 24 // 16*MAX_COLUMNS + (192+32)*2 + insert into ..
#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN
#define FETCH_BUFFER_SIZE 100 * TSDB_MAX_ALLOWED_SQL_LEN
#define COND_BUF_LEN (BUFFER_SIZE - 30)
#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
#define MAX_USERNAME_SIZE 64
#define MAX_HOSTNAME_SIZE \
253 // https://man7.org/linux/man-pages/man7/hostname.7.html
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE \
(16 * TSDB_MAX_COLUMNS) + 20 // max record len: 16*MAX_COLUMNS, timestamp
// string and ,('') need extra space
#define OPT_ABORT 1 /* –abort */
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
#define MAX_PATH_LEN 4096
#define DEFAULT_START_TIME 1500000000000
#define MAX_PREPARED_RAND 1000000
#define INT_BUFF_LEN 12
#define BIGINT_BUFF_LEN 21
#define SMALLINT_BUFF_LEN 7
#define TINYINT_BUFF_LEN 5
#define BOOL_BUFF_LEN 6
#define FLOAT_BUFF_LEN 22
#define DOUBLE_BUFF_LEN 42
#define TIMESTAMP_BUFF_LEN 21
#define PRINT_STAT_INTERVAL 30 * 1000
#define MAX_SAMPLES 10000
#define MAX_NUM_COLUMNS \
(TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
#define MAX_DB_COUNT 8
#define MAX_SUPER_TABLE_COUNT 200
#define MAX_QUERY_SQL_COUNT 100
#define MAX_DATABASE_COUNT 256
#define MAX_JSON_BUFF 6400000
#define INPUT_BUF_LEN 256
#define EXTRA_SQL_LEN 256
#define TBNAME_PREFIX_LEN \
(TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq
#define SMALL_BUFF_LEN 8
#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN * 3)
#define NOTE_BUFF_LEN (SMALL_BUFF_LEN * 16)
#define DEFAULT_NTHREADS 8
#define DEFAULT_TIMESTAMP_STEP 1
#define DEFAULT_INTERLACE_ROWS 0
#define DEFAULT_DATATYPE_NUM 1
#define DEFAULT_CHILDTABLES 10000
#define DEFAULT_TEST_MODE 0
#define DEFAULT_METAFILE NULL
#define DEFAULT_SQLFILE NULL
#define DEFAULT_HOST "localhost"
#define DEFAULT_PORT 6030
#define DEFAULT_IFACE INTERFACE_BUT
#define DEFAULT_DATABASE "test"
#define DEFAULT_REPLICA 1
#define DEFAULT_TB_PREFIX "d"
#define DEFAULT_ESCAPE_CHAR false
#define DEFAULT_USE_METRIC true
#define DEFAULT_DROP_DB true
#define DEFAULT_AGGR_FUNC false
#define DEFAULT_DEBUG false
#define DEFAULT_VERBOSE false
#define DEFAULT_PERF_STAT false
#define DEFAULT_ANS_YES false
#define DEFAULT_OUTPUT "./output.txt"
#define DEFAULT_SYNC_MODE 0
#define DEFAULT_DATA_TYPE \
{ TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT }
#define DEFAULT_DATATYPE \
{ "FLOAT", "INT", "FLOAT" }
#define DEFAULT_DATALENGTH \
{ 4, 4, 4 }
#define DEFAULT_BINWIDTH 64
#define DEFAULT_COL_COUNT 4
#define DEFAULT_LEN_ONE_ROW 76
#define DEFAULT_INSERT_INTERVAL 0
#define DEFAULT_QUERY_TIME 1
#define DEFAULT_PREPARED_RAND 10000
#define DEFAULT_REQ_PER_REQ 30000
#define DEFAULT_INSERT_ROWS 10000
#define DEFAULT_ABORT 0
#define DEFAULT_RATIO 0
#define DEFAULT_DISORDER_RANGE 1000
#define DEFAULT_METHOD_DEL 1
#define DEFAULT_TOTAL_INSERT 0
#define DEFAULT_TOTAL_AFFECT 0
#define DEFAULT_DEMO_MODE true
#define DEFAULT_CHINESE_OPT false
#define DEFAULT_CREATE_BATCH 10
#define DEFAULT_SUB_INTERVAL 10000
#define DEFAULT_QUERY_INTERVAL 10000
#define SML_LINE_SQL_SYNTAX_OFFSET 7
#if _MSC_VER <= 1900
#define __func__ __FUNCTION__
#endif
#define debugPrint(fmt, ...) \
do { \
if (g_args.debug_print || g_args.verbose_print) \
fprintf(stderr, "DEBG: " fmt, __VA_ARGS__); \
} while (0)
#define verbosePrint(fmt, ...) \
do { \
if (g_args.verbose_print) fprintf(stderr, "VERB: " fmt, __VA_ARGS__); \
} while (0)
#define performancePrint(fmt, ...) \
do { \
if (g_args.performance_print) \
fprintf(stderr, "PERF: " fmt, __VA_ARGS__); \
} while (0)
#define errorPrint(fmt, ...) \
do { \
fprintf(stderr, "\033[31m"); \
fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \
fprintf(stderr, "ERROR: " fmt, __VA_ARGS__); \
fprintf(stderr, "\033[0m"); \
} while (0)
enum TEST_MODE {
INSERT_TEST, // 0
QUERY_TEST, // 1
SUBSCRIBE_TEST, // 2
INVAID_TEST
};
typedef enum CREATE_SUB_TABLE_MOD_EN {
PRE_CREATE_SUBTBL,
AUTO_CREATE_SUBTBL,
NO_CREATE_SUBTBL
} CREATE_SUB_TABLE_MOD_EN;
typedef enum TABLE_EXISTS_EN {
TBL_NO_EXISTS,
TBL_ALREADY_EXISTS,
TBL_EXISTS_BUTT
} TABLE_EXISTS_EN;
enum enumSYNC_MODE { SYNC_MODE, ASYNC_MODE, MODE_BUT };
enum enum_TAOS_INTERFACE {
TAOSC_IFACE,
REST_IFACE,
STMT_IFACE,
SML_IFACE,
INTERFACE_BUT
};
typedef enum enumQUERY_CLASS {
SPECIFIED_CLASS,
STABLE_CLASS,
CLASS_BUT
} QUERY_CLASS;
typedef enum enum_PROGRESSIVE_OR_INTERLACE {
PROGRESSIVE_INSERT_MODE,
INTERLACE_INSERT_MODE,
INVALID_INSERT_MODE
} PROG_OR_INTERLACE_MODE;
typedef enum enumQUERY_TYPE {
NO_INSERT_TYPE,
INSERT_TYPE,
QUERY_TYPE_BUT
} QUERY_TYPE;
enum _show_db_index {
TSDB_SHOW_DB_NAME_INDEX,
TSDB_SHOW_DB_CREATED_TIME_INDEX,
TSDB_SHOW_DB_NTABLES_INDEX,
TSDB_SHOW_DB_VGROUPS_INDEX,
TSDB_SHOW_DB_REPLICA_INDEX,
TSDB_SHOW_DB_QUORUM_INDEX,
TSDB_SHOW_DB_DAYS_INDEX,
TSDB_SHOW_DB_KEEP_INDEX,
TSDB_SHOW_DB_CACHE_INDEX,
TSDB_SHOW_DB_BLOCKS_INDEX,
TSDB_SHOW_DB_MINROWS_INDEX,
TSDB_SHOW_DB_MAXROWS_INDEX,
TSDB_SHOW_DB_WALLEVEL_INDEX,
TSDB_SHOW_DB_FSYNC_INDEX,
TSDB_SHOW_DB_COMP_INDEX,
TSDB_SHOW_DB_CACHELAST_INDEX,
TSDB_SHOW_DB_PRECISION_INDEX,
TSDB_SHOW_DB_UPDATE_INDEX,
TSDB_SHOW_DB_STATUS_INDEX,
TSDB_MAX_SHOW_DB
};
// -----------------------------------------SHOW TABLES CONFIGURE
// -------------------------------------
enum _show_stables_index {
TSDB_SHOW_STABLES_NAME_INDEX,
TSDB_SHOW_STABLES_CREATED_TIME_INDEX,
TSDB_SHOW_STABLES_COLUMNS_INDEX,
TSDB_SHOW_STABLES_METRIC_INDEX,
TSDB_SHOW_STABLES_UID_INDEX,
TSDB_SHOW_STABLES_TID_INDEX,
TSDB_SHOW_STABLES_VGID_INDEX,
TSDB_MAX_SHOW_STABLES
};
enum _describe_table_index {
TSDB_DESCRIBE_METRIC_FIELD_INDEX,
TSDB_DESCRIBE_METRIC_TYPE_INDEX,
TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
TSDB_DESCRIBE_METRIC_NOTE_INDEX,
TSDB_MAX_DESCRIBE_METRIC
};
typedef struct SArguments_S {
char * metaFile;
uint32_t test_mode;
char * host;
uint16_t port;
uint16_t iface;
char * user;
char password[SHELL_MAX_PASSWORD_LEN];
char * database;
int replica;
char * tb_prefix;
bool escapeChar;
char * sqlFile;
bool use_metric;
bool drop_database;
bool aggr_func;
bool answer_yes;
bool debug_print;
bool verbose_print;
bool performance_print;
char * output_file;
bool async_mode;
char data_type[MAX_NUM_COLUMNS + 1];
char * dataType[MAX_NUM_COLUMNS + 1];
int32_t data_length[MAX_NUM_COLUMNS + 1];
uint32_t binwidth;
uint32_t columnCount;
uint64_t lenOfOneRow;
uint32_t nthreads;
uint64_t insert_interval;
uint64_t timestamp_step;
int64_t query_times;
int64_t prepared_rand;
uint32_t interlaceRows;
uint32_t reqPerReq; // num_of_records_per_req
uint64_t max_sql_len;
int64_t ntables;
int64_t insertRows;
int abort;
uint32_t disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms, us or ns. according to database precision
uint32_t method_of_delete;
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
bool demo_mode; // use default column name and semi-random data
bool chinese;
} SArguments;
typedef struct SColumn_S {
char field[TSDB_COL_NAME_LEN];
char data_type;
char dataType[DATATYPE_BUFF_LEN];
uint32_t dataLen;
char note[NOTE_BUFF_LEN];
} StrColumn;
typedef struct SSuperTable_S {
char stbName[TSDB_TABLE_NAME_LEN];
char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample
char childTblPrefix[TBNAME_PREFIX_LEN];
uint16_t childTblExists;
int64_t childTblCount;
uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in
// one sql
uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
uint16_t iface; // 0: taosc, 1: rest, 2: stmt
uint16_t lineProtocol;
int64_t childTblLimit;
uint64_t childTblOffset;
// int multiThreadWriteOneTbl; // 0: no, 1: yes
uint32_t interlaceRows; //
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms, us or ns. according to database precision
uint64_t maxSqlLen; //
uint64_t insertInterval; // insert interval, will override global insert
// interval
int64_t insertRows;
int64_t timeStampStep;
int tsPrecision;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[SMALL_BUFF_LEN]; // csv, json
char sampleFile[MAX_FILE_NAME_LEN];
char tagsFile[MAX_FILE_NAME_LEN];
uint32_t columnCount;
StrColumn columns[TSDB_MAX_COLUMNS];
uint32_t tagCount;
StrColumn tags[TSDB_MAX_TAGS];
char * childTblName;
bool escapeChar;
char * colsOfCreateChildTable;
uint64_t lenOfOneRow;
uint64_t lenOfTagOfOneRow;
char *sampleDataBuf;
bool useSampleTs;
uint32_t tagSource; // 0: rand, 1: tag sample
char * tagDataBuf;
uint32_t tagSampleCount;
uint32_t tagUsePos;
// bind param batch
char *sampleBindBatchArray;
// statistics
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
} SSuperTable;
typedef struct {
char name[TSDB_DB_NAME_LEN];
char create_time[32];
int64_t ntables;
int32_t vgroups;
int16_t replica;
int16_t quorum;
int16_t days;
char keeplist[64];
int32_t cache; // MB
int32_t blocks;
int32_t minrows;
int32_t maxrows;
int8_t wallevel;
int32_t fsync;
int8_t comp;
int8_t cachelast;
char precision[SMALL_BUFF_LEN]; // time resolution
int8_t update;
char status[16];
} SDbInfo;
typedef struct SDbCfg_S {
// int maxtablesPerVnode;
uint32_t minRows; // 0 means default
uint32_t maxRows; // 0 means default
int comp;
int walLevel;
int cacheLast;
int fsync;
int replica;
int update;
int keep;
int days;
int cache;
int blocks;
int quorum;
char precision[SMALL_BUFF_LEN];
} SDbCfg;
typedef struct SDataBase_S {
char dbName[TSDB_DB_NAME_LEN];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
uint64_t superTblCount;
SSuperTable *superTbls;
} SDataBase;
typedef struct SDbs_S {
char cfgDir[MAX_FILE_NAME_LEN];
char host[MAX_HOSTNAME_SIZE];
struct sockaddr_in serv_addr;
uint16_t port;
char user[MAX_USERNAME_SIZE];
char password[SHELL_MAX_PASSWORD_LEN];
char resultFile[MAX_FILE_NAME_LEN];
bool use_metric;
bool aggr_func;
bool asyncMode;
uint32_t threadCount;
uint32_t threadCountForCreateTbl;
uint32_t dbCount;
// statistics
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
SDataBase *db;
} SDbs;
typedef struct SpecifiedQueryInfo_S {
uint64_t queryInterval; // 0: unlimited > 0 loop/s
uint32_t concurrent;
int sqlCount;
uint32_t asyncMode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms
uint64_t queryTimes;
bool subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE + 1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int endAfterConsume[MAX_QUERY_SQL_COUNT];
TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT];
char topic[MAX_QUERY_SQL_COUNT][32];
int consumed[MAX_QUERY_SQL_COUNT];
TAOS_RES *res[MAX_QUERY_SQL_COUNT];
uint64_t totalQueried;
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
char stbName[TSDB_TABLE_NAME_LEN];
uint64_t queryInterval; // 0: unlimited > 0 loop/s
uint32_t threadCnt;
uint32_t asyncMode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms
bool subscribeRestart;
int subscribeKeepProgress;
uint64_t queryTimes;
int64_t childTblCount;
char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq
int sqlCount;
char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE + 1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume;
int endAfterConsume;
TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT];
char * childTblName;
uint64_t totalQueried;
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
char cfgDir[MAX_FILE_NAME_LEN];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[SHELL_MAX_PASSWORD_LEN];
char dbName[TSDB_DB_NAME_LEN];
char queryMode[SMALL_BUFF_LEN]; // taosc, rest
SpecifiedQueryInfo specifiedQueryInfo;
SuperQueryInfo superQueryInfo;
uint64_t totalQueried;
} SQueryMetaInfo;
typedef struct SThreadInfo_S {
TAOS * taos;
TAOS_STMT * stmt;
int64_t * bind_ts;
int64_t * bind_ts_array;
char * bindParams;
char * is_null;
int threadID;
char db_name[TSDB_DB_NAME_LEN];
uint32_t time_precision;
char filePath[MAX_PATH_LEN];
FILE * fp;
char tb_prefix[TSDB_TABLE_NAME_LEN];
uint64_t start_table_from;
uint64_t end_table_to;
int64_t ntables;
int64_t tables_created;
uint64_t data_of_rate;
int64_t start_time;
char * cols;
bool use_metric;
SSuperTable *stbInfo;
char * buffer; // sql cmd buffer
// for async insert
tsem_t lock_sem;
int64_t counter;
uint64_t st;
uint64_t et;
uint64_t lastTs;
// sample data
int64_t samplePos;
// statistics
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
// insert delay statistics
uint64_t cntDelay;
uint64_t totalDelay;
uint64_t avgDelay;
uint64_t maxDelay;
uint64_t minDelay;
// seq of query or subscribe
uint64_t querySeq; // sequence number of sql command
TAOS_SUB *tsub;
char **lines;
SOCKET sockfd;
} threadInfo;
/* ************ Global variables ************ */
extern char * g_aggreFuncDemo[];
extern char * g_aggreFunc[];
extern SArguments g_args;
extern SDbs g_Dbs;
extern char * g_dupstr;
extern int64_t g_totalChildTables;
extern int64_t g_actualChildTables;
extern SQueryMetaInfo g_queryInfo;
extern FILE * g_fpOfInsertResult;
#define min(a, b) (((a) < (b)) ? (a) : (b))
/* ************ Function declares ************ */
/* demoCommandOpt.c */
int parse_args(int argc, char *argv[]);
void setParaFromArg();
void querySqlFile(TAOS *taos, char *sqlFile);
void testCmdLine();
/* demoJsonOpt.c */
int getInfoFromJsonFile(char *file);
int testMetaFile();
/* demoUtil.c */
int isCommentLine(char *line);
void replaceChildTblName(char *inSql, char *outSql, int tblIndex);
void setupForAnsiEscape(void);
void resetAfterAnsiEscape(void);
int taosRandom();
void tmfree(void *buf);
void tmfclose(FILE *fp);
void fetchResult(TAOS_RES *res, threadInfo *pThreadInfo);
void prompt();
void ERROR_EXIT(const char *msg);
int postProceSql(char *host, uint16_t port, char *sqlstr,
threadInfo *pThreadInfo);
int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
int regexMatch(const char *s, const char *reg, int cflags);
int convertHostToServAddr(char *host, uint16_t port,
struct sockaddr_in *serv_addr);
char *formatTimestamp(char *buf, int64_t val, int precision);
void errorWrongValue(char *program, char *wrong_arg, char *wrong_value);
void errorUnrecognized(char *program, char *wrong_arg);
void errorPrintReqArg(char *program, char *wrong_arg);
void errorPrintReqArg2(char *program, char *wrong_arg);
void errorPrintReqArg3(char *program, char *wrong_arg);
bool isStringNumber(char *input);
int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName,
char ** childTblNameOfSuperTbl,
int64_t *childTblCountOfSuperTbl);
int getChildNameOfSuperTableWithLimitAndOffset(TAOS *taos, char *dbName,
char * stbName,
char ** childTblNameOfSuperTbl,
int64_t *childTblCountOfSuperTbl,
int64_t limit, uint64_t offset,
bool escapChar);
/* demoInsert.c */
int insertTestProcess();
void postFreeResource();
/* demoOutput.c */
void printVersion();
void printfInsertMeta();
void printfInsertMetaToFile(FILE *fp);
void printStatPerThread(threadInfo *pThreadInfo);
void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo);
void printfQueryMeta();
void printHelp();
void printfQuerySystemInfo(TAOS *taos);
/* demoQuery.c */
int queryTestProcess();
/* demoSubscribe.c */
int subscribeTestProcess();
#endif
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DEMODATA__
#define __DEMODATA__
#include "cJSON.h"
#include "demo.h"
/***** Global variables ******/
extern char * g_sampleDataBuf;
extern char * g_sampleBindBatchArray;
extern int32_t * g_randint;
extern uint32_t *g_randuint;
extern int64_t * g_randbigint;
extern uint64_t *g_randubigint;
extern float * g_randfloat;
extern double * g_randdouble;
extern char * g_randbool_buff;
extern char * g_randint_buff;
extern char * g_randuint_buff;
extern char * g_rand_voltage_buff;
extern char * g_randbigint_buff;
extern char * g_randubigint_buff;
extern char * g_randsmallint_buff;
extern char * g_randusmallint_buff;
extern char * g_randtinyint_buff;
extern char * g_randutinyint_buff;
extern char * g_randfloat_buff;
extern char * g_rand_current_buff;
extern char * g_rand_phase_buff;
extern char * g_randdouble_buff;
/***** Declare functions *****/
int init_rand_data();
char * rand_bool_str();
int32_t rand_bool();
char * rand_tinyint_str();
int32_t rand_tinyint();
char * rand_utinyint_str();
int32_t rand_utinyint();
char * rand_smallint_str();
int32_t rand_smallint();
char * rand_usmallint_str();
int32_t rand_usmallint();
char * rand_int_str();
int32_t rand_int();
char * rand_uint_str();
int32_t rand_uint();
char * rand_bigint_str();
int64_t rand_bigint();
char * rand_ubigint_str();
int64_t rand_ubigint();
char * rand_float_str();
float rand_float();
char * demo_current_float_str();
float UNUSED_FUNC demo_current_float();
char * demo_voltage_int_str();
int32_t UNUSED_FUNC demo_voltage_int();
char * demo_phase_float_str();
float UNUSED_FUNC demo_phase_float();
void rand_string(char *str, int size);
char * rand_double_str();
double rand_double();
int generateTagValuesForStb(SSuperTable *stbInfo, int64_t tableSeq,
char *tagsValBuf);
int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, int disorderRatio,
int disorderRange);
int32_t prepareStbStmtBindTag(char *bindArray, SSuperTable *stbInfo,
char *tagsVal, int32_t timePrec);
int32_t prepareStmtWithoutStb(threadInfo *pThreadInfo, char *tableName,
uint32_t batch, int64_t insertRows,
int64_t recordFrom, int64_t startTime);
int32_t generateStbInterlaceData(threadInfo *pThreadInfo, char *tableName,
uint32_t batchPerTbl, uint64_t i,
uint32_t batchPerTblTimes, uint64_t tableSeq,
char *buffer, int64_t insertRows,
int64_t startTime, uint64_t *pRemainderBufLen);
int64_t generateInterlaceDataWithoutStb(char *tableName, uint32_t batch,
uint64_t tableSeq, char *dbName,
char *buffer, int64_t insertRows,
int64_t startTime,
uint64_t *pRemainderBufLen);
int32_t generateStbProgressiveData(SSuperTable *stbInfo, char *tableName,
int64_t tableSeq, char *dbName, char *buffer,
int64_t insertRows, uint64_t recordFrom,
int64_t startTime, int64_t *pSamplePos,
int64_t *pRemainderBufLen);
int32_t generateProgressiveDataWithoutStb(
char *tableName, threadInfo *pThreadInfo, char *buffer, int64_t insertRows,
uint64_t recordFrom, int64_t startTime, int64_t *pRemainderBufLen);
int64_t generateStbRowData(SSuperTable *stbInfo, char *recBuf,
int64_t remainderBufLen, int64_t timestamp);
int prepareSampleForStb(SSuperTable *stbInfo);
int prepareSampleForNtb();
int parseSamplefileToStmtBatch(SSuperTable *stbInfo);
int parseStbSampleToStmtBatchForThread(threadInfo * pThreadInfo,
SSuperTable *stbInfo, uint32_t timePrec,
uint32_t batch);
int parseNtbSampleToStmtBatchForThread(threadInfo *pThreadInfo,
uint32_t timePrec, uint32_t batch);
int prepareSampleData();
int32_t generateSmlConstPart(char *sml, SSuperTable *stbInfo,
threadInfo *pThreadInfo, int tbSeq);
int32_t generateSmlMutablePart(char *line, char *sml, SSuperTable *stbInfo,
threadInfo *pThreadInfo, int64_t timestamp);
int32_t generateSmlJsonTags(cJSON *tagsList, SSuperTable *stbInfo,
threadInfo *pThreadInfo, int tbSeq);
int32_t generateSmlJsonCols(cJSON *array, cJSON *tag, SSuperTable *stbInfo,
threadInfo *pThreadInfo, int64_t timestamp);
#endif
\ No newline at end of file
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 1000,
"num_of_records_per_req": 100,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"interlace_rows": 20,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}],
"tags": [{"type": "TINYINT", "count":1}]
}]
}]
}
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 100,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb",
"child_table_exists":"no",
"childtable_count": 10000,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"interlace_rows": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "yes",
"databases": "dbx",
"query_times": 1,
"specified_table_query": {
"query_interval": 1,
"concurrent": 4,
"sqls": [
{
"sql": "select last_row(*) from stb where color='red'",
"result": "./query_res0.txt"
},
{
"sql": "select count(*) from stb_01",
"result": "./query_res1.txt"
}
]
},
"super_table_query": {
"stblname": "stb",
"query_interval": 1,
"threads": 4,
"sqls": [
{
"sql": "select last_row(*) from xxxx",
"result": "./query_res2.txt"
}
]
}
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "demo.h"
int64_t g_totalChildTables = DEFAULT_CHILDTABLES;
int64_t g_actualChildTables = 0;
FILE * g_fpOfInsertResult = NULL;
char * g_dupstr = NULL;
SDbs g_Dbs;
SQueryMetaInfo g_queryInfo;
SArguments g_args = {
DEFAULT_METAFILE, // metaFile
DEFAULT_TEST_MODE, // test_mode
DEFAULT_HOST, // host
DEFAULT_PORT, // port
DEFAULT_IFACE, // iface
TSDB_DEFAULT_USER, // user
TSDB_DEFAULT_PASS, // password
DEFAULT_DATABASE, // database
DEFAULT_REPLICA, // replica
DEFAULT_TB_PREFIX, // tb_prefix
DEFAULT_ESCAPE_CHAR, // escapeChar
DEFAULT_SQLFILE, // sqlFile
DEFAULT_USE_METRIC, // use_metric
DEFAULT_DROP_DB, // drop_database
DEFAULT_AGGR_FUNC, // aggr_func
DEFAULT_DEBUG, // debug_print
DEFAULT_VERBOSE, // verbose_print
DEFAULT_PERF_STAT, // performance statistic print
DEFAULT_ANS_YES, // answer_yes;
DEFAULT_OUTPUT, // output_file
DEFAULT_SYNC_MODE, // mode : sync or async
DEFAULT_DATA_TYPE, // data_type
DEFAULT_DATATYPE, // dataType
DEFAULT_DATALENGTH, // data_length
DEFAULT_BINWIDTH, // binwidth
DEFAULT_COL_COUNT, // columnCount, timestamp + float + int + float
DEFAULT_LEN_ONE_ROW, // lenOfOneRow
DEFAULT_NTHREADS, // nthreads
DEFAULT_INSERT_INTERVAL, // insert_interval
DEFAULT_TIMESTAMP_STEP, // timestamp_step
DEFAULT_QUERY_TIME, // query_times
DEFAULT_PREPARED_RAND, // prepared_rand
DEFAULT_INTERLACE_ROWS, // interlaceRows;
DEFAULT_REQ_PER_REQ, // reqPerReq
TSDB_MAX_ALLOWED_SQL_LEN, // max_sql_len
DEFAULT_CHILDTABLES, // ntables
DEFAULT_INSERT_ROWS, // insertRows
DEFAULT_ABORT, // abort
DEFAULT_RATIO, // disorderRatio
DEFAULT_DISORDER_RANGE, // disorderRange
DEFAULT_METHOD_DEL, // method_of_delete
DEFAULT_TOTAL_INSERT, // totalInsertRows;
DEFAULT_TOTAL_AFFECT, // totalAffectedRows;
DEFAULT_DEMO_MODE, // demo_mode;
DEFAULT_CHINESE_OPT // chinese
};
int main(int argc, char *argv[]) {
if (parse_args(argc, argv)) {
exit(EXIT_FAILURE);
}
debugPrint("meta file: %s\n", g_args.metaFile);
if (g_args.metaFile) {
g_totalChildTables = 0;
if (getInfoFromJsonFile(g_args.metaFile)) {
exit(EXIT_FAILURE);
}
if (testMetaFile()) {
exit(EXIT_FAILURE);
}
} else {
memset(&g_Dbs, 0, sizeof(SDbs));
g_Dbs.db = calloc(1, sizeof(SDataBase));
if (NULL == g_Dbs.db) {
errorPrint("%s", "failed to allocate memory\n");
}
g_Dbs.db[0].superTbls = calloc(1, sizeof(SSuperTable));
if (NULL == g_Dbs.db[0].superTbls) {
errorPrint("%s", "failed to allocate memory\n");
}
setParaFromArg();
if (NULL != g_args.sqlFile) {
TAOS *qtaos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password,
g_Dbs.db[0].dbName, g_Dbs.port);
querySqlFile(qtaos, g_args.sqlFile);
taos_close(qtaos);
} else {
testCmdLine();
}
}
postFreeResource();
return 0;
}
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
{
"filetype": "subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "test",
"specified_table_query": {
"concurrent": 1,
"mode": "sync",
"interval": 1000,
"restart": "yes",
"keepProgress": "yes",
"resubAfterConsume": 10,
"sqls": [
{
"sql": "select avg(col1) from meters where col1 > 1;",
"result": "./subscribe_res0.txt"
}
]
},
"super_table_query": {
"stblname": "meters",
"threads": 1,
"mode": "sync",
"interval": 1000,
"restart": "yes",
"keepProgress": "yes",
"sqls": [
{
"sql": "select col1 from xxxx where col1 > 10;",
"result": "./subscribe_res1.txt"
}
]
}
}
此差异已折叠。
...@@ -714,13 +714,17 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { ...@@ -714,13 +714,17 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo, static SMemRow tsdbInsertDupKeyMerge(SMemRow row1, SMemRow row2, STsdbRepo* pRepo,
STSchema **ppSchema1, STSchema **ppSchema2, STSchema **ppSchema1, STSchema **ppSchema2,
STable* pTable, int32_t* pPoints, SMemRow* pLastRow) { STable* pTable, int32_t* pPoints, SMemRow* pLastRow) {
//for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows! //for compatiblity, duplicate key inserted when update=0 should be also calculated as affected rows!
if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) { if(row1 == NULL && row2 == NULL && pRepo->config.update == TD_ROW_DISCARD_UPDATE) {
(*pPoints)++; (*pPoints)++;
return NULL; return NULL;
} }
tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
"updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable),
memRowKey(row1));
if(row2 == NULL || pRepo->config.update != TD_ROW_PARTIAL_UPDATE) { if(row2 == NULL || pRepo->config.update != TD_ROW_PARTIAL_UPDATE) {
void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1)); void* pMem = tsdbAllocBytes(pRepo, memRowTLen(row1));
if(pMem == NULL) return NULL; if(pMem == NULL) return NULL;
...@@ -841,7 +845,6 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t * ...@@ -841,7 +845,6 @@ static int tsdbInsertDataToTable(STsdbRepo* pRepo, SSubmitBlk* pBlock, int32_t *
int64_t dsize = SL_SIZE(pTableData->pData) - osize; int64_t dsize = SL_SIZE(pTableData->pData) - osize;
(*pAffectedRows) += points; (*pAffectedRows) += points;
if(lastRow != NULL) { if(lastRow != NULL) {
TSKEY lastRowKey = memRowKey(lastRow); TSKEY lastRowKey = memRowKey(lastRow);
if (pMemTable->keyFirst > firstRowKey) pMemTable->keyFirst = firstRowKey; if (pMemTable->keyFirst > firstRowKey) pMemTable->keyFirst = firstRowKey;
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
4. pip install ../src/connector/python ; pip3 install 4. pip install ../src/connector/python ; pip3 install
../src/connector/python ../src/connector/python
5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py) 5. pip install numpy; pip3 install numpy fabric2 psutil pandas(numpy is required only if you need to run querySort.py)
> Note: Both Python2 and Python3 are currently supported by the Python test > Note: Both Python2 and Python3 are currently supported by the Python test
> framework. Since Python2 is no longer officially supported by Python Software > framework. Since Python2 is no longer officially supported by Python Software
......
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
<RootNamespace>C_checker</RootNamespace>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="1.0.2" />
</ItemGroup>
</Project>
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace TDengineDriver
{
enum TDengineDataType
{
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10, // unicode string
TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes
TSDB_DATA_TYPE_UINT = 13, // 4 bytes
TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes
}
enum TDengineInitOption
{
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOL";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "TINYINT";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SMALLINT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "BIGINT";
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
return "TINYINT UNSIGNED";
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
return "SMALLINT UNSIGNED";
case TDengineDataType.TSDB_DATA_TYPE_UINT:
return "INT UNSIGNED";
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
return "BIGINT UNSIGNED";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
}
}
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
[DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
[DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
[DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
}
[DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
{
const int fieldSize = 68;
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
}
[DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
[DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
//get precision in restultset
[DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
static extern public int ResultPrecision(IntPtr taos);
//schemaless API
[DllImport("taos",SetLastError = true, EntryPoint = "taos_schemaless_insert", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr SchemalessInsert(IntPtr taos, string[] lines, int numLines, int protocol, int precision);
}
}
此差异已折叠。
此差异已折叠。
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="1.0.2" />
</ItemGroup>
</Project>
此差异已折叠。
...@@ -6,4 +6,8 @@ ...@@ -6,4 +6,8 @@
<GenerateAssemblyInfo>false</GenerateAssemblyInfo> <GenerateAssemblyInfo>false</GenerateAssemblyInfo>
</PropertyGroup> </PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="1.0.2" />
</ItemGroup>
</Project> </Project>
...@@ -182,7 +182,7 @@ python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoIns ...@@ -182,7 +182,7 @@ python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoIns
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py
# disable due to taosdump don't support sql format. python3 test.py -f tools/taosdumpTestNanoSupport.py python3 test.py -f tools/taosdumpTestNanoSupport.py
# #
python3 ./test.py -f tsdb/tsdbComp.py python3 ./test.py -f tsdb/tsdbComp.py
...@@ -305,7 +305,7 @@ python3 ./test.py -f client/client.py ...@@ -305,7 +305,7 @@ python3 ./test.py -f client/client.py
python3 ./test.py -f client/version.py python3 ./test.py -f client/version.py
python3 ./test.py -f client/alterDatabase.py python3 ./test.py -f client/alterDatabase.py
python3 ./test.py -f client/noConnectionErrorTest.py python3 ./test.py -f client/noConnectionErrorTest.py
# disable due to taosdump don't support sql format. python3 ./test.py -f client/taoshellCheckCase.py python3 ./test.py -f client/taoshellCheckCase.py
# python3 test.py -f client/change_time_1_1.py # python3 test.py -f client/change_time_1_1.py
# python3 test.py -f client/change_time_1_2.py # python3 test.py -f client/change_time_1_2.py
...@@ -371,6 +371,8 @@ python3 ./test.py -f functions/function_irate.py ...@@ -371,6 +371,8 @@ python3 ./test.py -f functions/function_irate.py
python3 ./test.py -f functions/function_ceil.py python3 ./test.py -f functions/function_ceil.py
python3 ./test.py -f functions/function_floor.py python3 ./test.py -f functions/function_floor.py
python3 ./test.py -f functions/function_round.py python3 ./test.py -f functions/function_round.py
python3 ./test.py -f functions/function_mavg.py
python3 ./test.py -f functions/function_csum.py
python3 ./test.py -f insert/unsignedInt.py python3 ./test.py -f insert/unsignedInt.py
python3 ./test.py -f insert/unsignedBigint.py python3 ./test.py -f insert/unsignedBigint.py
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -129,7 +129,7 @@ class TDTestCase: ...@@ -129,7 +129,7 @@ class TDTestCase:
print("schemaless_insert result {}".format(code)) print("schemaless_insert result {}".format(code))
tdSql.query("describe stb0_3") tdSql.query("describe stb0_3")
tdSql.checkData(1, 1, "BINARY") tdSql.checkData(1, 1, "NCHAR")
payload = [''' payload = ['''
{ {
...@@ -835,7 +835,7 @@ class TDTestCase: ...@@ -835,7 +835,7 @@ class TDTestCase:
code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value) code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value)
print("schemaless_insert result {}".format(code)) print("schemaless_insert result {}".format(code))
tdSql.query("describe `stable`") tdSql.query("describe `STABLE`")
tdSql.checkRows(9) tdSql.checkRows(9)
#tdSql.query("select * from `key`") #tdSql.query("select * from `key`")
......
...@@ -333,7 +333,7 @@ class TDTestCase: ...@@ -333,7 +333,7 @@ class TDTestCase:
tdSql.query('describe `!@#$.%^&*()`') tdSql.query('describe `!@#$.%^&*()`')
tdSql.checkRows(9) tdSql.checkRows(9)
tdSql.query('describe `stable`') tdSql.query('describe `STABLE`')
tdSql.checkRows(9) tdSql.checkRows(9)
#tdSql.query('select * from `123`') #tdSql.query('select * from `123`')
......
此差异已折叠。
此差异已折叠。
...@@ -171,8 +171,8 @@ class TDTestCase: ...@@ -171,8 +171,8 @@ class TDTestCase:
#print("==============taosdemo——json_no,#create stable,table; insert table; show table; select table; drop table") #print("==============taosdemo——json_no,#create stable,table; insert table; show table; select table; drop table")
assert os.system("%staosdemo -f tools/taosdemoAllTest/TD-10539/create_taosdemo_no.json -y " % binPath) == 0 os.system("%staosdemo -f tools/taosdemoAllTest/TD-10539/create_taosdemo_no.json -y " % binPath)
tdSql.query("show dbno.tables ") tdSql.query("show dbno.tables;")
tdSql.checkRows(0) tdSql.checkRows(0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册