提交 fcb74fcb 编写于 作者: G Ganlin Zhao

Merge branch 'develop' into fix/TD-12276

...@@ -86,7 +86,6 @@ tests/script/api/batchprepare ...@@ -86,7 +86,6 @@ tests/script/api/batchprepare
tests/script/api/stmt tests/script/api/stmt
tests/script/api/stmtBatchTest tests/script/api/stmtBatchTest
tests/script/api/stmtTest tests/script/api/stmtTest
# Emacs # Emacs
# -*- mode: gitignore; -*- # -*- mode: gitignore; -*-
*~ *~
......
...@@ -450,7 +450,7 @@ pipeline { ...@@ -450,7 +450,7 @@ pipeline {
stage('test_b1_s2') { stage('test_b1_s2') {
agent{label " slave2 || slave12 "} agent{label " slave2 || slave12 "}
steps { steps {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 105, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' sh '''
rm -rf /var/lib/taos/* rm -rf /var/lib/taos/*
......
...@@ -265,7 +265,7 @@ IF (TD_WINDOWS) ...@@ -265,7 +265,7 @@ IF (TD_WINDOWS)
ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE) SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL) IF (NOT TD_GODLL)
SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd5999 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd4999 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900)) IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF () ENDIF ()
......
...@@ -83,9 +83,10 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ...@@ -83,9 +83,10 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它 * [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它
* [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。 * [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。
## [TDengine 组件与工具](/tools/adapter) ## [TDengine 组件与工具](/cn/documentation/)
* [taosAdapter用户手册](/tools/adapter) * [taosAdapter 用户手册](/tools/adapter)
* [TDinsight 用户手册](/tools/insight)
## [与其他工具的连接](/connections) ## [与其他工具的连接](/connections)
......
...@@ -331,49 +331,238 @@ JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错( ...@@ -331,49 +331,238 @@ JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(
### <a class="anchor" id="stmt-java"></a>通过参数绑定写入数据 ### <a class="anchor" id="stmt-java"></a>通过参数绑定写入数据
从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。) 从 2.1.2.0 版本开始,TDengine 的 JDBC-JNI 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
注意:
* JDBC-RESTful 实现并不提供参数绑定这种使用方式
* 以下示例代码基于taos-jdbcdriver-2.0.36
* binary类型数据需要调用setString方法,nchar类型数据需要调用setNString方法
* setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
示例代码:
```java ```java
Random r = new Random(); public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 20;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
};
private static final int numOfSubTable = 10, numOfRow = 10;
// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值: public static void main(String[] args) throws SQLException {
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
// 设定数据表名: String jdbcUrl = "jdbc:TAOS://" + host + ":6030/";
s.setTableName("w1"); Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
// 设定 TAGS 取值:
s.setTagInt(0, r.nextInt(10));
s.setTagString(1, "Beijing");
int numOfRows = 10; init(conn);
// VALUES 部分以逐列的方式进行设置: bindInteger(conn);
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<Integer> s1 = new ArrayList<>(); bindFloat(conn);
for (int i = 0; i < numOfRows; i++){
s1.add(r.nextInt(100)); bindBoolean(conn);
}
s.setInt(1, s1); bindBytes(conn);
bindString(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_parabind");
stmt.execute("create database if not exists test_parabind");
stmt.execute("use test_parabind");
for (int i = 0; i < schemaList.length; i++) {
stmt.execute(schemaList[i]);
}
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t1_" + i);
// set tags
pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE));
pstmt.setTagLong(3, random.nextLong());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Byte> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setByte(1, f1List);
ArrayList<Short> f2List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setShort(2, f2List);
ArrayList<Integer> f3List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f3List.add(random.nextInt(Integer.MAX_VALUE));
pstmt.setInt(3, f3List);
ArrayList<Long> f4List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f4List.add(random.nextLong());
pstmt.setLong(4, f4List);
// add column
pstmt.columnDataAddBatch();
}
// execute column
pstmt.columnDataExecuteBatch();
}
}
private static void bindFloat(Connection conn) throws SQLException {
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class);
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t2_" + i);
// set tags
pstmt.setTagFloat(0, random.nextFloat());
pstmt.setTagDouble(1, random.nextDouble());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Float> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(random.nextFloat());
pstmt.setFloat(1, f1List);
ArrayList<Double> f2List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f2List.add(random.nextDouble());
pstmt.setDouble(2, f2List);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
// close if no try-with-catch statement is used
pstmt.close();
}
private static void bindBoolean(Connection conn) throws SQLException {
String sql = "insert into ? using stable3 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t3_" + i);
// set tags
pstmt.setTagBoolean(0, random.nextBoolean());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Boolean> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(random.nextBoolean());
pstmt.setBoolean(1, f1List);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindBytes(Connection conn) throws SQLException {
String sql = "insert into ? using stable4 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t4_" + i);
// set tags
pstmt.setTagString(0, new String("abc"));
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<String> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add(new String("abc"));
}
pstmt.setString(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindString(Connection conn) throws SQLException {
String sql = "insert into ? using stable5 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
ArrayList<String> s2 = new ArrayList<>(); for (int i = 1; i <= numOfSubTable; i++) {
for (int i = 0; i < numOfRows; i++){ // set table name
s2.add("test" + r.nextInt(100)); pstmt.setTableName("t5_" + i);
// set tags
pstmt.setTagNString(0, "北京-abc");
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<String> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add("北京-abc");
}
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
} }
s.setString(2, s2, 10);
// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据:
s.columnDataAddBatch();
// 执行绑定数据后的语句:
s.columnDataExecuteBatch();
// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
s.columnDataClearBatch();
// 执行完毕,释放资源:
s.columnDataCloseBatch();
``` ```
用于设定 TAGS 取值的方法总共有: 用于设定 TAGS 取值的方法总共有:
...@@ -405,8 +594,6 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws ...@@ -405,8 +594,6 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
``` ```
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
## <a class="anchor" id="subscribe"></a>订阅 ## <a class="anchor" id="subscribe"></a>订阅
### 创建 ### 创建
......
...@@ -55,7 +55,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 ...@@ -55,7 +55,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
*install_client.sh*:安装脚本,用于应用驱动程序 *install_client.sh*:安装脚本,用于应用驱动程序
*taos.tar.gz*:应用驱动安装包 *taos.tar.gz*:应用驱动安装包
*driver*:TDengine应用驱动driver *driver*:TDengine应用驱动driver
*connector*: 各种编程语言连接器(go/grafanaplugin/nodejs/python/JDBC) *connector*: 各种编程语言连接器(go/nodejs/python/JDBC)
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R) *examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R)
运行install_client.sh进行安装。 运行install_client.sh进行安装。
...@@ -541,9 +541,8 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 ...@@ -541,9 +541,8 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html) Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html)
**安装**:参见下面具体步骤 * **安装**:参见下面具体步骤
* **示例程序**:位于install_directory/examples/python
**示例程序**:位于install_directory/examples/python
### 安装 ### 安装
...@@ -557,47 +556,36 @@ Python连接器支持的系统有:Linux 64/Windows x64 ...@@ -557,47 +556,36 @@ Python连接器支持的系统有:Linux 64/Windows x64
### Python连接器安装 ### Python连接器安装
**Linux** Python 连接器可以通过 `pip` 从 PyPI 下载安装。注意 TDengine Python 连接器的包名为 `taospy` 而不是 `taos`(这是一个与 TDengine 无关的另一个程序)。但为保持向后兼容性,仍然使用 `import taos` 导入。
用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到connector安装包。用户可以通过pip命令安装:
`pip install src/connector/python/`
`pip3 install src/connector/python/`
**Windows**
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\Windows\system32" 目录下, 然后进入Windows *cmd* 命令行界面
```bash ```bash
cd C:\TDengine\connector\python pip install taospy
python -m pip install .
``` ```
**PyPI** 如果不使用系统默认的 `python``pip`,则需要指定 `pip` 的版本或路径:
从2.1.1版本开始,用户可以从[PyPI](https://pypi.org/project/taospy/)安装: ```bash
pip2 install taospy
```sh pip3 install taospy
pip install taospy
``` ```
* 如果机器上没有pip命令,用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。 Python 命令行依赖 taos 动态库 `libtaos.so``taos.dll`, 对于 Windows 客户端,安装TDengine windows 客户端后,如果不能正常 `import taos`,可以将 `C:\TDengine\driver\taos.dll` 拷贝到 `C:\windows\system32` 目录后重新尝试。
对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
对于无法联网用户,可以将TDengine客户端中的 `connector/python` 路径(Linux 下其安装路径为 `/usr/local/taos/connector/python/`,Windows 下默认安装路径为 `C:\TDengine\connector\python`)添加到 `PYTHONPATH` 环境变量中使用。
### 示例程序 ### 示例程序
示例程序源码位于install_directory/examples/Python,有: 示例程序源码位于 `<install_directory>/examples/python`,有:
**read_example.py Python示例源程序**
* **read_example.py** Python示例源程序
用户可以参考read_example.py这个程序来设计用户自己的写入、查询程序。 用户可以参考`read_example.py`这个程序来设计用户自己的写入、查询程序。
在安装了对应的应用驱动后,通过import taos引入taos类。主要步骤如下: 在安装了对应的应用驱动后,通过`import taos`引入taos类。主要步骤如下:
- 通过taos.connect获取TDengineConnection对象,这个对象可以一个程序只申请一个,在多线程中共享。 - 通过taos.connect获取TaosConnection对象,这个对象可以一个程序只申请一个,在多线程中共享。
- 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。 - 通过TaosConnection对象的 `.cursor()` 方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。
- 通过游标对象的execute()方法,执行写入或查询的SQL语句。 - 通过游标对象的execute()方法,执行写入或查询的SQL语句。
...@@ -634,127 +622,132 @@ for row in results: ...@@ -634,127 +622,132 @@ for row in results:
print(row) print(row)
``` ```
#### 代码示例 ##### 代码示例
* 导入TDengine客户端模块 1. 导入TDengine客户端模块
```python ```python
import taos import taos
``` ```
* 获取连接并获取游标对象
```python 2. 获取连接并获取游标对象
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
* *host* 是TDengine 服务端所有IP, *config* 为客户端配置文件所在目录
* 写入数据 ```python
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
```python *host* 是TDengine 服务端所在IP, *config* 为客户端配置文件所在目录。
import datetime
3. 写入数据
# 创建数据库
c1.execute('create database db') ```python
c1.execute('use db') import datetime
# 建表
c1.execute('create table tb (ts timestamp, temperature int, humidity float)') # 创建数据库
# 插入数据 c1.execute('create database db')
start_time = datetime.datetime(2019, 11, 1) c1.execute('use db')
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time) # 建表
# 批量插入数据 c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
time_interval = datetime.timedelta(seconds=60) # 插入数据
sqlcmd = ['insert into tb values'] start_time = datetime.datetime(2019, 11, 1)
for irow in range(1,11): affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# 批量插入数据
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2)) sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd)) affected_rows = c1.execute(' '.join(sqlcmd))
``` ```
* 查询数据 4. 查询数据
```python ```python
c1.execute('select * from tb') c1.execute('select * from tb')
# 拉取查询结果 # 拉取查询结果
data = c1.fetchall() data = c1.fetchall()
# 返回的结果是一个列表,每一行构成列表的一个元素 # 返回的结果是一个列表,每一行构成列表的一个元素
numOfRows = c1.rowcount numOfRows = c1.rowcount
numOfCols = len(c1.description) numOfCols = len(c1.description)
for irow in range(numOfRows): for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1], data[irow][2]))
# 直接使用cursor 循环拉取查询结果 # 直接使用cursor 循环拉取查询结果
c1.execute('select * from tb') c1.execute('select * from tb')
for data in c1: for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])) print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1], data[2]))
``` ```
* 从v2.1.0版本开始, 我们提供另外一种API:`connection.query` #### Query API
```python 从v2.1.0版本开始, 我们提供另外一种方法:`connection.query` 来操作数据库。
import taos
```python
import taos
conn = taos.connect() conn = taos.connect()
conn.execute("create database if not exists pytest") conn.execute("create database if not exists pytest")
result = conn.query("show databases") result = conn.query("show databases")
num_of_fields = result.field_count num_of_fields = result.field_count
for field in result.fields: for field in result.fields:
print(field) print(field)
for row in result: for row in result:
print(row) print(row)
conn.execute("drop database pytest") conn.execute("drop database pytest")
``` ```
`query` 方法会返回一个 `TaosResult` 类对象,并提供了以下有用的属性或方法: `query` 方法会返回一个 `TaosResult` 对象,并提供了以下属性或方法:
属性: 属性:
- `fields`: `TaosFields` 集合类,提供返回数据的列信息。 - `fields`: `TaosFields` 集合类,提供返回数据的列信息。
- `field_count`: 返回数据的列数. - `field_count`: 返回数据的列数.
- `affected_rows`: 插入数据的行数. - `affected_rows`: 插入数据的行数.
- `row_count`: 查询数据结果数. - `row_count`: 查询数据结果数.
- `precision`: 当前数据库的时间精度. - `precision`: 当前数据库的时间精度.
方法: 方法:
- `fetch_all()`: 类似于 `cursor.fetchall()` 返回同样的集合数据 - `fetch_all()`: 类似于 `cursor.fetchall()` 返回同样的集合数据
- `fetch_all_into_dict()`: v2.1.1 新添加的API,将上面的数据转换成字典类型返回 - `fetch_all_into_dict()`: v2.1.1 新添加的API,将上面的数据转换成字典类型返回
- `blocks_iter()` `rows_iter()`: 根据底层API提供的两种不同迭代器。 - `blocks_iter()` `rows_iter()`: 根据底层API提供的两种不同迭代器。
- `fetch_rows_a`: 异步API - `fetch_rows_a`: 异步API
- `errno`: 错误码 - `errno`: 错误码
- `errstr`: 错误信息 - `errstr`: 错误信息
- `close`: 关闭结果对象,一般不需要直接调用 - `close`: 关闭结果对象,一般不需要直接调用
#### 订阅 API
* 创建订阅 1. 创建一个同步订阅队列:
```python ```python
# 创建一个主题为 'test' 消费周期为1000毫秒的订阅 # 创建一个主题为 'test' 消费周期为1000毫秒的订阅
# 第一个参数为 True 表示重新开始订阅,如为 False 且之前创建过主题为 'test' 的订阅,则表示继续消费此订阅的数据,而不是重新开始消费所有数据 # 第一个参数为 True 表示重新开始订阅,如为 False 且之前创建过主题为 'test' 的订阅,
sub = conn.subscribe(True, "test", "select * from tb;", 1000) # 则表示继续消费此订阅的数据,而不是重新开始消费所有数据
``` sub = conn.subscribe(True, "test", "select * from tb;", 1000)
```
* 消费订阅的数据 2. 消费订阅的数据
```python ```python
data = sub.consume() data = sub.consume()
for d in data: for d in data:
print(d) print(d)
``` ```
* 取消订阅 3. 取消订阅
```python ```python
sub.close() sub.close()
``` ```
* 关闭连接 4. 关闭连接
```python ```python
c1.close() conn.close()
conn.close() ```
```
#### 关于纳秒 (nanosecond) 在 Python 连接器中的说明 #### 关于纳秒 (nanosecond) 在 Python 连接器中的说明
...@@ -767,30 +760,20 @@ conn.close() ...@@ -767,30 +760,20 @@ conn.close()
用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法: 用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法:
- _TDengineConnection_ 类 - _TaosConnection_ 类
参考python中help(taos.TDengineConnection)。 参考python中help(taos.TaosConnection)。
这个类对应客户端和TDengine建立的一个连接。在客户端多线程的场景下,推荐每个线程申请一个独立的连接实例,而不建议多线程共享一个连接。 这个类对应客户端和TDengine建立的一个连接。在客户端多线程的场景下,推荐每个线程申请一个独立的连接实例,而不建议多线程共享一个连接。
- _TDengineCursor_ 类 - _TaosCursor_ 类
参考python中help(taos.TDengineCursor)。 参考python中help(taos.TaosCursor)。
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。 这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
- _connect_ 方法 - _connect_ 方法
用于生成taos.TDengineConnection的实例。 用于生成taos.TaosConnection的实例。
### Python客户端使用示例代码
在tests/examples/python中,我们提供了一个示例Python程序read_example.py,可以参考这个程序来设计用户自己的写入、查询程序。在安装了对应的客户端后,通过import taos引入taos类。主要步骤如下
- 通过taos.connect获取TDengineConnection对象,这个对象可以一个程序只申请一个,在多线程中共享。
- 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。
- 通过游标对象的execute()方法,执行写入或查询的SQL语句。
- 如果执行的是写入语句,execute返回的是成功写入的行数信息affected rows。
- 如果执行的是查询语句,则execute执行成功后,需要通过fetchall方法去拉取结果集。
具体方法可以参考示例代码。
## <a class="anchor" id="restful"></a>RESTful Connector ## <a class="anchor" id="restful"></a>RESTful Connector
......
...@@ -14,7 +14,7 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ...@@ -14,7 +14,7 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
TDengine 的 Grafana 插件请从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载。 TDengine 的 Grafana 插件请从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载。
```bash ```bash
GF_VERSION=3.1.1 GF_VERSION=3.1.3
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
``` ```
...@@ -75,15 +75,7 @@ allow_loading_unsigned_plugins = tdengine-datasource ...@@ -75,15 +75,7 @@ allow_loading_unsigned_plugins = tdengine-datasource
#### 导入 Dashboard #### 导入 Dashboard
我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146) 在 2.3.3.0 及以上版本,您可以导入 TDinsight Dashboard (Grafana Dashboard ID: [15167](https://grafana.com/grafana/dashboards/15167)) 作为 TDengine 集群的监控可视化工具。安装和使用说明请见 [TDinsight 用户手册](https://www.taosdata.com/cn/documentation/tools/insight)
点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载:
![img](../images/connections/import_dashboard1.jpg)
导入完成之后可看到如下效果:
![img](../images/connections/dashboard-15146.png)
## <a class="anchor" id="matlab"></a>MATLAB ## <a class="anchor" id="matlab"></a>MATLAB
......
...@@ -79,9 +79,10 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ...@@ -79,9 +79,10 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment - [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment
- [Rust Connector](/connector/rust): A taosc/RESTful API based TDengine client for Rust - [Rust Connector](/connector/rust): A taosc/RESTful API based TDengine client for Rust
## [Components and Tools](/tools/adapter) ## [Components and Tools](/cn/documentation/)
* [taosAdapter](/tools/adapter) * [taosAdapter User Manual](/tools/adapter)
* [TDinsight User Manual](/tools/insight)
## [Connections with Other Tools](/connections) ## [Connections with Other Tools](/connections)
......
...@@ -310,46 +310,239 @@ The Java connector may report three types of error codes: JDBC Driver (error cod ...@@ -310,46 +310,239 @@ The Java connector may report three types of error codes: JDBC Driver (error cod
- https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h - https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
### Write data through parameter binding ### Write data through parameter binding
Starting with version 2.1.2.0, TDengine's JDBC-JNI implementation significantly improves support for data write (INSERT) scenarios with Parameter-Binding. When writing data in this way, you can avoid the resource consumption of SQL parsing, which can significantly improve write performance in many cases.
Note:
* Jdbc-restful implementations do not provide Parameter-Binding
* The following sample code is based on taos-jdbcdriver-2.0.36
* use setString to bind BINARY data, and use setNString to bind NCHAR data
* Both setString and setNString require the user to declare the column width of the corresponding column in the table definition in the size parameter
Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly improved parameter binding support for data write (INSERT) scenarios. Data can be written in the following way, avoiding SQL parsing and significantly improving the write performance.(**Note**: parameter binding is not supported in JDBC-RESTful)
Sample Code:
```java ```java
Statement stmt = conn.createStatement(); public class ParameterBindingDemo {
Random r = new Random();
// In the INSERT statement, the VALUES clause allows you to specify a specific column; If automatic table creation is adopted, the TAGS clause needs to set the parameter values of all TAGS columns private static final String host = "127.0.0.1";
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)"); private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 20;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
};
private static final int numOfSubTable = 10, numOfRow = 10;
s.setTableName("w1"); public static void main(String[] args) throws SQLException {
// set tags String jdbcUrl = "jdbc:TAOS://" + host + ":6030/";
s.setTagInt(0, r.nextInt(10)); Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
s.setTagString(1, "Beijing");
int numOfRows = 10;
// set values init(conn);
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){ bindInteger(conn);
ts.add(System.currentTimeMillis() + i);
} bindFloat(conn);
s.setTimestamp(0, ts);
ArrayList<Integer> s1 = new ArrayList<>(); bindBoolean(conn);
for (int i = 0; i < numOfRows; i++){
s1.add(r.nextInt(100)); bindBytes(conn);
}
s.setInt(1, s1); bindString(conn);
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){ conn.close();
s2.add("test" + r.nextInt(100)); }
private static void init(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_parabind");
stmt.execute("create database if not exists test_parabind");
stmt.execute("use test_parabind");
for (int i = 0; i < schemaList.length; i++) {
stmt.execute(schemaList[i]);
}
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t1_" + i);
// set tags
pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE));
pstmt.setTagLong(3, random.nextLong());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Byte> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setByte(1, f1List);
ArrayList<Short> f2List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setShort(2, f2List);
ArrayList<Integer> f3List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f3List.add(random.nextInt(Integer.MAX_VALUE));
pstmt.setInt(3, f3List);
ArrayList<Long> f4List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f4List.add(random.nextLong());
pstmt.setLong(4, f4List);
// add column
pstmt.columnDataAddBatch();
}
// execute column
pstmt.columnDataExecuteBatch();
}
}
private static void bindFloat(Connection conn) throws SQLException {
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class);
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t2_" + i);
// set tags
pstmt.setTagFloat(0, random.nextFloat());
pstmt.setTagDouble(1, random.nextDouble());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Float> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(random.nextFloat());
pstmt.setFloat(1, f1List);
ArrayList<Double> f2List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f2List.add(random.nextDouble());
pstmt.setDouble(2, f2List);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
// close if no try-with-catch statement is used
pstmt.close();
}
private static void bindBoolean(Connection conn) throws SQLException {
String sql = "insert into ? using stable3 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t3_" + i);
// set tags
pstmt.setTagBoolean(0, random.nextBoolean());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Boolean> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(random.nextBoolean());
pstmt.setBoolean(1, f1List);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindBytes(Connection conn) throws SQLException {
String sql = "insert into ? using stable4 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t4_" + i);
// set tags
pstmt.setTagString(0, new String("abc"));
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<String> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add(new String("abc"));
}
pstmt.setString(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindString(Connection conn) throws SQLException {
String sql = "insert into ? using stable5 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
pstmt.setTagNString(0, "北京-abc");
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<String> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add("北京-abc");
}
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
} }
s.setString(2, s2, 10);
// The cache is not cleared after AddBatch. Do not bind new data again before ExecuteBatch
s.columnDataAddBatch();
s.columnDataExecuteBatch();
// Clear the cache, after which you can bind new data(including table names, tags, values):
s.columnDataClearBatch();
s.columnDataCloseBatch();
``` ```
The methods used to set tags are: The methods used to set tags are:
...@@ -383,8 +576,6 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws ...@@ -383,8 +576,6 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
``` ```
**Note**: Both setString and setNString require the user to declare the column width of the corresponding column in the table definition in the size parameter.
### Data Subscription ### Data Subscription
#### Subscribe #### Subscribe
......
...@@ -411,38 +411,22 @@ See [video tutorials](https://www.taosdata.com/blog/2020/11/11/1963.html) for th ...@@ -411,38 +411,22 @@ See [video tutorials](https://www.taosdata.com/blog/2020/11/11/1963.html) for th
### Python connector installation ### Python connector installation
#### Linux From TDengine 2.4, users can install python connector for TDengine with `pip`. Note that the package name is **taospy** (not `taos` - a fully unrelated package). For backward compatibility, we still use `import taos` to import connector package.
Users can find the connector package for python2 and python3 in the source code src/connector/python (or tar.gz/connector/python) folder. Users can install it through `pip` command: ```bash
`pip install src/connector/python/`
or
`pip3 install src/connector/python/`
You can install the `taospy` connector from [PyPI](https://pypi.org/project/taospy/):
```sh
pip install taospy pip install taospy
``` ```
#### Windows Use your version-specific `pip` command as if you need.
With Windows TDengine client installed, copy the file "C:\TDengine\driver\taos.dll" to the "C:\Windows\system32" directory and enter the Windows *cmd* command line interface:
```cmd ```bash
cd C:\TDengine\connector\python pip2 install taospy
python -m pip install . pip3 install taospy
``` ```
Or install from PyPI: The python connector requires `libtaos` library (`libtaos.so` in Linux, or `taos.dll` in Windows). For Windows client, if `import taos` failed, you could copy the dll `C:\TDengine\driver\taos.dll` to `C:\windows\system32` and try it again.
```cmd
pip install taospy
```
- If there is no `pip` command on the machine, the user can copy the taos folder under src/connector/python to the application directory for use. For Windows client, after installing the TDengine Windows client, copy C:\ TDengine\driver\taos.dll to the C:\ windows\ system32 directory. For users that has a limited network environment, just add the `connector/python` of installed directory(commonly `/usr/local/taos/connector/python/` in Linux, `C:\TDengine\connector\python` in Windows) to `PYTHONPATH` environment variable.
### How to use ### How to use
...@@ -462,63 +446,66 @@ for row in results: ...@@ -462,63 +446,66 @@ for row in results:
print(row) print(row)
``` ```
#### Code sample ##### Code sample
- Import the TDengine client module - Import the TDengine client module
```python ```python
import taos import taos
``` ```
- Get the connection and cursor object - Get the connection and cursor object
```python ```python
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor() c1 = conn.cursor()
``` ```
*host* covers all IPs of TDengine server-side, and *config* is the directory where the client configuration files is located
- *host* covers all IPs of TDengine server-side, and *config* is the directory where the client configuration files is located
- Write data - Write data
```python ```python
import datetime import datetime
# Create a database # Create a database
c1.execute('create database db') c1.execute('create database db')
c1.execute('use db') c1.execute('use db')
# Create a table # Create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)') c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# Insert data # Insert data
start_time = datetime.datetime(2019, 11, 1) start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time) affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# Insert data in batch # Insert data in batch
time_interval = datetime.timedelta(seconds=60) time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values'] sqlcmd = ['insert into tb values']
for irow in range(1,11): for irow in range(1,11):
start_time += time_interval start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2)) sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd)) affected_rows = c1.execute(' '.join(sqlcmd))
``` ```
- Query data - Query data
```python ```python
c1.execute('select * from tb') c1.execute('select * from tb')
# pull query result # pull query result
data = c1.fetchall() data = c1.fetchall()
# The result is a list, with each row as an element # The result is a list, with each row as an element
numOfRows = c1.rowcount numOfRows = c1.rowcount
numOfCols = len(c1.description) numOfCols = len(c1.description)
for irow in range(numOfRows): for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]))
# Use cursor loop directly to pull query result # Use cursor loop directly to pull query result
c1.execute('select * from tb') c1.execute('select * from tb')
for data in c1: for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])) print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]))
``` ```
- Since v2.1.0, python connector provides a new API for query: #### Query API
Since v2.1.0, python connector provides a new API for query:
```python ```python
import taos import taos
...@@ -556,15 +543,19 @@ Functions: ...@@ -556,15 +543,19 @@ Functions:
- `errstr`: error string if failed. - `errstr`: error string if failed.
- `close`: close result, you do not need to call it directly, result will auto closed out of scope. - `close`: close result, you do not need to call it directly, result will auto closed out of scope.
- Create subscription #### Subscription API
Create subscription
```python ```python
# Create a subscription with the topic ‘test’ and a consumption cycle of 1000 milliseconds # Create a subscription with the topic ‘test’ and a consumption cycle of 1000 milliseconds
# If the first parameter is True, it means restarting the subscription. If it is False and a subscription with the topic 'test 'has been created before, it means continuing to consume the data of this subscription instead of restarting to consume all the data # If the first parameter is True, it means restarting the subscription.
# If it is False and a subscription with the topic 'test 'has been created before,
# it means continuing to consume the data of this subscription instead of restarting to consume all the data
sub = conn.subscribe(True, "test", "select * from tb;", 1000) sub = conn.subscribe(True, "test", "select * from tb;", 1000)
``` ```
- Consume subscription data Consume subscription data.
```python ```python
data = sub.consume() data = sub.consume()
...@@ -572,17 +563,15 @@ for d in data: ...@@ -572,17 +563,15 @@ for d in data:
print(d) print(d)
``` ```
- Unsubscription Unsubscribe.
```python ```python
sub.close() sub.close()
``` ```
Close connection.
- Close connection
```python ```python
c1.close()
conn.close() conn.close()
``` ```
......
...@@ -15,7 +15,7 @@ https://grafana.com/grafana/download. ...@@ -15,7 +15,7 @@ https://grafana.com/grafana/download.
Download grafana plugin from <https://github.com/taosdata/grafanaplugin/releases/latest> . Download grafana plugin from <https://github.com/taosdata/grafanaplugin/releases/latest> .
```bash ```bash
GF_VERSION=3.1.1 GF_VERSION=3.1.3
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
``` ```
...@@ -69,15 +69,7 @@ According to the default prompt, query the average system memory usage at the sp ...@@ -69,15 +69,7 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard #### Import Dashboard
We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146) We provide a TDinsight dashboard (via Grafana dashboard id: [15167](https://grafana.com/grafana/dashboards/15167)) for TDengine cluster monitoring since TDengine 2.3.3.x . Please refer to [TDinsight User Manual](https://www.taosdata.com/en/documentation/tools/insight) for the details.
Click the `Import` button on the left panel and load the grafana id:
![img](../images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported.
![img](../images/connections/dashboard-15146.png)
## <a class="anchor" id="matlab"></a> MATLAB ## <a class="anchor" id="matlab"></a> MATLAB
......
...@@ -143,6 +143,7 @@ static bool validateDebugFlag(int32_t v); ...@@ -143,6 +143,7 @@ static bool validateDebugFlag(int32_t v);
static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo); static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo); static int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo);
static tSqlExpr* extractExprForSTable(SSqlCmd* pCmd, tSqlExpr** pExpr, SQueryInfo* pQueryInfo, int32_t tableIndex); static tSqlExpr* extractExprForSTable(SSqlCmd* pCmd, tSqlExpr** pExpr, SQueryInfo* pQueryInfo, int32_t tableIndex);
static void convertWhereStringCharset(tSqlExpr* pRight);
int validateTableName(char *tblName, int len, SStrToken* psTblToken, bool *dbIncluded); int validateTableName(char *tblName, int len, SStrToken* psTblToken, bool *dbIncluded);
...@@ -1092,7 +1093,7 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql ...@@ -1092,7 +1093,7 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql
tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name)); tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name));
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL, getNewResColId(pCmd)); tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL, 0);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -2191,7 +2192,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS ...@@ -2191,7 +2192,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
} }
if(pItem->aliasName != NULL && validateColumnName(pItem->aliasName) != TSDB_CODE_SUCCESS){ if(pItem->aliasName != NULL && strcasecmp(pItem->aliasName, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == 0){
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
} }
...@@ -2774,12 +2775,17 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2774,12 +2775,17 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
} }
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pColumnSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
// elapsed only can be applied to primary key // elapsed only can be applied to primary key
if (functionId == TSDB_FUNC_ELAPSED && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { if (functionId == TSDB_FUNC_ELAPSED) {
if ( index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX || pColumnSchema->colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key"); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key");
} }
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
// functions can not be applied to tags // functions can not be applied to tags
...@@ -2809,7 +2815,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2809,7 +2815,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_CSUM) { if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_CSUM) {
SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false); TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false);
tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName)); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName));
SColumnList ids = createColumnList(1, 0, 0); SColumnList ids = createColumnList(1, 0, 0);
...@@ -3130,7 +3136,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -3130,7 +3136,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// set the first column ts for top/bottom query // set the first column ts for top/bottom query
int32_t tsFuncId = (functionId == TSDB_FUNC_MAVG) ? TSDB_FUNC_TS_DUMMY : TSDB_FUNC_TS; int32_t tsFuncId = (functionId == TSDB_FUNC_MAVG) ? TSDB_FUNC_TS_DUMMY : TSDB_FUNC_TS;
SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd), pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0,
0, false); 0, false);
tstrncpy(pExpr->base.aliasName, aAggs[tsFuncId].name, sizeof(pExpr->base.aliasName)); tstrncpy(pExpr->base.aliasName, aAggs[tsFuncId].name, sizeof(pExpr->base.aliasName));
...@@ -3156,7 +3162,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -3156,7 +3162,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// todo REFACTOR // todo REFACTOR
// set the first column ts for top/bottom query // set the first column ts for top/bottom query
SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd), pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0,
0, false); 0, false);
tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName)); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName));
...@@ -4958,25 +4964,23 @@ static int32_t validateJsonTagExpr(tSqlExpr* pExpr, char* msgBuf) { ...@@ -4958,25 +4964,23 @@ static int32_t validateJsonTagExpr(tSqlExpr* pExpr, char* msgBuf) {
return invalidOperationMsg(msgBuf, msg3); return invalidOperationMsg(msgBuf, msg3);
if (pLeft->pRight && (pLeft->pRight->value.nLen > TSDB_MAX_JSON_KEY_LEN || pLeft->pRight->value.nLen <= 0)) if (pLeft->pRight && (pLeft->pRight->value.nLen > TSDB_MAX_JSON_KEY_LEN || pLeft->pRight->value.nLen <= 0))
return invalidOperationMsg(msgBuf, msg2); return invalidOperationMsg(msgBuf, msg2);
if (pRight->tokenId == TK_NULL && pExpr->tokenId == TK_EQ) {
// transform for json->'key'=null
pRight->tokenId = TK_STRING;
pRight->value.nType = TSDB_DATA_TYPE_BINARY;
pRight->value.nLen = INT_BYTES;
pRight->value.pz = calloc(INT_BYTES, 1);
*(uint32_t*)pRight->value.pz = TSDB_DATA_JSON_null;
return TSDB_CODE_SUCCESS;
}
} }
if (pRight->value.nType == TSDB_DATA_TYPE_BINARY){ // json value store by nchar, so need to convert from binary to nchar if (pRight->value.nType == TSDB_DATA_TYPE_BINARY){ // json value store by nchar, so need to convert from binary to nchar
if(pRight->value.nLen == INT_BYTES && *(uint32_t*)pRight->value.pz == TSDB_DATA_JSON_null){
return TSDB_CODE_SUCCESS;
}
if(pRight->value.nLen == 0){ if(pRight->value.nLen == 0){
pRight->value.nType = TSDB_DATA_TYPE_NCHAR; pRight->value.nType = TSDB_DATA_TYPE_NCHAR;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
char newData[TSDB_MAX_JSON_TAGS_LEN] = {0}; convertWhereStringCharset(pRight);
int len = 0;
if(!taosMbsToUcs4(pRight->value.pz, pRight->value.nLen, newData, TSDB_MAX_JSON_TAGS_LEN, &len)){
tscError("json where condition mbsToUcs4 error");
}
pRight->value.pz = realloc(pRight->value.pz, len);
memcpy(pRight->value.pz, newData, len);
pRight->value.nLen = len;
pRight->value.nType = TSDB_DATA_TYPE_NCHAR;
} }
} }
...@@ -5040,6 +5044,34 @@ int32_t handleNeOptr(tSqlExpr** rexpr, tSqlExpr* expr) { ...@@ -5040,6 +5044,34 @@ int32_t handleNeOptr(tSqlExpr** rexpr, tSqlExpr* expr) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
void convertWhereStringCharset(tSqlExpr* pRight){
if(pRight->value.nType != TSDB_DATA_TYPE_BINARY || pRight->value.nLen == 0){
return;
}
char *newData = calloc(pRight->value.nLen * TSDB_NCHAR_SIZE, 1);
if(!newData){
tscError("convertWhereStringCharset calloc memory error");
return;
}
int len = 0;
if(!taosMbsToUcs4(pRight->value.pz, pRight->value.nLen, newData, pRight->value.nLen * TSDB_NCHAR_SIZE, &len)){
tscError("nchar in where condition mbsToUcs4 error");
free(newData);
return;
}
char* tmp = realloc(pRight->value.pz, len);
if (!tmp){
tscError("convertWhereStringCharset realloc memory error");
free(newData);
return;
}
pRight->value.pz = tmp;
memcpy(pRight->value.pz, newData, len);
pRight->value.nLen = len;
pRight->value.nType = TSDB_DATA_TYPE_NCHAR;
free(newData);
}
static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SCondExpr* pCondExpr, static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SCondExpr* pCondExpr,
int32_t* type, int32_t* tbIdx, int32_t parentOptr, tSqlExpr** columnExpr, int32_t* type, int32_t* tbIdx, int32_t parentOptr, tSqlExpr** columnExpr,
...@@ -5056,14 +5088,6 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql ...@@ -5056,14 +5088,6 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
SStrToken* colName = NULL; SStrToken* colName = NULL;
if(pLeft->tokenId == TK_ARROW){ if(pLeft->tokenId == TK_ARROW){
colName = &(pLeft->pLeft->columnName); colName = &(pLeft->pLeft->columnName);
if (pRight->tokenId == TK_NULL && (*pExpr)->tokenId == TK_EQ) {
// transform for json->'key'=null
pRight->tokenId = TK_STRING;
pRight->value.nType = TSDB_DATA_TYPE_BINARY;
pRight->value.nLen = INT_BYTES;
pRight->value.pz = calloc(INT_BYTES, 1);
*(uint32_t*)pRight->value.pz = TSDB_DATA_JSON_null;
}
}else{ }else{
colName = &(pLeft->columnName); colName = &(pLeft->columnName);
} }
...@@ -5100,6 +5124,11 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql ...@@ -5100,6 +5124,11 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
} }
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
if (pSchema->type == TSDB_DATA_TYPE_NCHAR){
convertWhereStringCharset(pRight);
}
if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range
if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
......
...@@ -837,7 +837,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, ...@@ -837,7 +837,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
if (pExpr->resColId >= 0) { if (pExpr->resColId > 0) {
tscError("result column id underflowed: %d", pExpr->resColId); tscError("result column id underflowed: %d", pExpr->resColId);
return TSDB_CODE_TSC_RES_TOO_MANY; return TSDB_CODE_TSC_RES_TOO_MANY;
} }
......
...@@ -5511,18 +5511,20 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in ...@@ -5511,18 +5511,20 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in
if(item->type == cJSON_String){ // add json value format: type|data if(item->type == cJSON_String){ // add json value format: type|data
char *jsonValue = item->valuestring; char *jsonValue = item->valuestring;
outLen = 0; outLen = 0;
char tagVal[TSDB_MAX_JSON_TAGS_LEN] = {0}; char *tagVal = calloc(strlen(jsonValue) * TSDB_NCHAR_SIZE + TSDB_NCHAR_SIZE, 1);
*tagVal = jsonType2DbType(0, item->type); // type *tagVal = jsonType2DbType(0, item->type); // type
char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES); char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES);
if (!taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData), if (!taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData),
TSDB_MAX_JSON_TAGS_LEN - CHAR_BYTES - VARSTR_HEADER_SIZE, &outLen)) { (int32_t)(strlen(jsonValue) * TSDB_NCHAR_SIZE), &outLen)) {
tscError("json string error:%s|%s", strerror(errno), jsonValue); tscError("json string error:%s|%s", strerror(errno), jsonValue);
retCode = tscSQLSyntaxErrMsg(errMsg, "serizelize json error", NULL); retCode = tscSQLSyntaxErrMsg(errMsg, "serizelize json error", NULL);
free(tagVal);
goto end; goto end;
} }
varDataSetLen(tagData, outLen); varDataSetLen(tagData, outLen);
tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, tagVal, true); tdAddColToKVRow(kvRowBuilder, jsonIndex++, TSDB_DATA_TYPE_NCHAR, tagVal, true);
free(tagVal);
}else if(item->type == cJSON_Number){ }else if(item->type == cJSON_Number){
if(!isfinite(item->valuedouble)){ if(!isfinite(item->valuedouble)){
tscError("json value is invalidate"); tscError("json value is invalidate");
......
src/TDengineDriver/bin/
src/TDengineDriver/obj/
src/test/Cases/bin/
src/test/Cases/obj/
src/test/XUnitTest/bin/
src/test/XUnitTest/obj/
...@@ -35,7 +35,8 @@ namespace TDengineDriver ...@@ -35,7 +35,8 @@ namespace TDengineDriver
TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
TSDB_DATA_TYPE_USMALLINT = 12,// 2 bytes TSDB_DATA_TYPE_USMALLINT = 12,// 2 bytes
TSDB_DATA_TYPE_UINT = 13, // 4 bytes TSDB_DATA_TYPE_UINT = 13, // 4 bytes
TSDB_DATA_TYPE_UBIGINT = 14 // 8 bytes TSDB_DATA_TYPE_UBIGINT = 14, // 8 bytes
TSDB_DATA_TYPE_JSONTAG = 15 //4096 bytes
} }
public enum TDengineInitOption public enum TDengineInitOption
...@@ -46,7 +47,6 @@ namespace TDengineDriver ...@@ -46,7 +47,6 @@ namespace TDengineDriver
TDDB_OPTION_CONFIGDIR = 3, TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
} }
enum TaosField enum TaosField
{ {
STRUCT_SIZE = 68, STRUCT_SIZE = 68,
...@@ -92,6 +92,8 @@ namespace TDengineDriver ...@@ -92,6 +92,8 @@ namespace TDengineDriver
return "TIMESTAMP"; return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR: case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR"; return "NCHAR";
case TDengineDataType.TSDB_DATA_TYPE_JSONTAG:
return "JSON";
default: default:
return "undefine"; return "undefine";
} }
...@@ -204,6 +206,7 @@ namespace TDengineDriver ...@@ -204,6 +206,7 @@ namespace TDengineDriver
metas.Add(meta); metas.Add(meta);
} }
return metas; return metas;
} }
......
using System;
using Test.UtilsTools;
namespace Cases
{
public class JsonTagTest
{
public void Test(IntPtr conn)
{
Console.WriteLine("STEP 1 prepare data & validate json string===== ");
UtilsTools.ExecuteQuery(conn, "create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 1, false, 'json1', '涛思数据') (1591060608000, 23, true, '涛思数据', 'json')");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss')");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe')");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd')");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '涛思数据', 'ewe')");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '涛思数据','')");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_7 using jsons1 tags('{\"tag1\":\"涛思数据\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '涛思数据', 'dws')");
Console.WriteLine("");
Console.WriteLine("test duplicate key using the first one. elimate empty key======== ");
UtilsTools.ExecuteQuery(conn, "CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90}')");
Console.WriteLine("");
Console.WriteLine("test empty json string, save as jtag is NULL========== ");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '涛思数据', '2sdw')");
UtilsTools.ExecuteQuery(conn, "CREATE TABLE if not exists jsons1_10 using jsons1 tags('')");
UtilsTools.ExecuteQuery(conn, "CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ')");
UtilsTools.ExecuteQuery(conn, "CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}')");
UtilsTools.ExecuteQuery(conn, "CREATE TABLE if not exists jsons1_13 using jsons1 tags('null')");
Console.WriteLine("");
Console.WriteLine("test invalidate json==================== ");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('\"efwewf\"')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('3333')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('33.33')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('false')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')");
Console.WriteLine("");
Console.WriteLine("test invalidate json key, key must can be printed assic char========== ");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"。loc\":\"fff\"}')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\":\"fff\"}')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\t\":\"fff\"}')");
UtilsTools.ExecuteErrorQuery(conn, "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"试试\":\"fff\"}')");
Console.WriteLine("");
Console.WriteLine("STEP 2 alter table json tag============");
UtilsTools.ExecuteErrorQuery(conn, "ALTER STABLE jsons1 add tag tag2 nchar(20)");
UtilsTools.ExecuteErrorQuery(conn, "ALTER STABLE jsons1 drop tag jtag");
UtilsTools.ExecuteErrorQuery(conn, "ALTER TABLE jsons1_1 SET TAG jtag=4");
UtilsTools.ExecuteQuery(conn, "ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'");
Console.WriteLine("");
Console.WriteLine("STEP 3 query table============");
Console.WriteLine("test error syntax============");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->tag1='beijing'");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->'location'");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->''");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->''=9");
UtilsTools.ExecuteErrorQuery(conn, "select -> from jsons1");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where contains");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->");
UtilsTools.ExecuteErrorQuery(conn, "select jtag->location from jsons1");
UtilsTools.ExecuteErrorQuery(conn, "select jtag contains location from jsons1");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag contains location");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag contains''");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag contains 'location'='beijing'");
Console.WriteLine("");
Console.WriteLine("test select normal column===========");
IntPtr res = IntPtr.Zero;
res = UtilsTools.ExecuteQuery(conn, "select dataint from jsons1");
UtilsTools.DisplayRes(res);
Console.WriteLine("test select json tag===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select jtag from jsons1");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select jtag from jsons1 where jtag is null");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select jtag from jsons1 where jtag is not null");
UtilsTools.DisplayRes(res);
Console.WriteLine("test #line 41===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag from jsons1_8");
UtilsTools.DisplayRes(res);
Console.WriteLine("test #line 72===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag from jsons1_1");
UtilsTools.DisplayRes(res);
Console.WriteLine("test jtag is NULL===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag from jsons1_9");
UtilsTools.DisplayRes(res);
Console.WriteLine("test select json tag->'key', value is string ===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag1' from jsons1_1");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag2' from jsons1_6");
UtilsTools.DisplayRes(res);
Console.WriteLine("test select json tag->'key', value is int===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag2' from jsons1_1");
UtilsTools.DisplayRes(res);
Console.WriteLine("test select json tag->'key', value is bool===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag3' from jsons1_1");
UtilsTools.DisplayRes(res);
Console.WriteLine("test select json tag->'key', value is null===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag1' from jsons1_4");
UtilsTools.DisplayRes(res);
Console.WriteLine("test select json tag->'key', value is double===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag1' from jsons1_5");
UtilsTools.DisplayRes(res);
Console.WriteLine("test select json tag->'key', key is not exist===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag10' from jsons1_4");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag1' from jsons1");
UtilsTools.DisplayRes(res);
Console.WriteLine("test header name===========");
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag1' from jsons1");
UtilsTools.DisplayRes(res);
Console.WriteLine("test where with json tag===========");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1_1 where jtag is not null");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->'tag1'={}");
Console.WriteLine("where json value is string===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag2'='beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'='beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'='涛思数据'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag2'>'beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag2'>='beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag2'<'beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag2'<='beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag2'!='beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag2'=''");
UtilsTools.DisplayRes(res);
Console.WriteLine("where json value is int===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=5");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=10");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'<54");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'<=11");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'>4");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'>=5");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'!=5");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'!=55");
UtilsTools.DisplayRes(res);
Console.WriteLine("where json value is double===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=1.232");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'<1.232");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'<=1.232");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'>1.23");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'>=1.232");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'!=1.232");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'!=3.232");
UtilsTools.DisplayRes(res);
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->'tag1'/0=3");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->'tag1'/5=1");
Console.WriteLine("where json value is bool===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=true");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=false");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'!=false");
UtilsTools.DisplayRes(res);
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->'tag1'>false");
Console.WriteLine("where json value is null===========");
Console.WriteLine("only json suport =null. This synatx will change later.===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=null");
UtilsTools.DisplayRes(res);
Console.WriteLine("where json is null===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag is null");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag is not null");
UtilsTools.DisplayRes(res);
Console.WriteLine("where json key is null===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag_no_exist'=3");
UtilsTools.DisplayRes(res);
Console.WriteLine("where json value is not exist===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1' is null");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag4' is null");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag3' is not null");
UtilsTools.DisplayRes(res);
Console.WriteLine("test contains===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag contains 'tag1'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag contains 'tag3'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag contains 'tag_no_exist'");
UtilsTools.DisplayRes(res);
Console.WriteLine("test json tag in where condition with and/or===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'");
UtilsTools.DisplayRes(res);
Console.WriteLine("test with tbname/normal column===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where tbname = 'jsons1_1'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23");
UtilsTools.DisplayRes(res);
Console.WriteLine("test where condition like===========");
res = UtilsTools.ExecuteQuery(conn, "select *,tbname from jsons1 where jtag->'tag2' like 'bei%'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select *,tbname from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null");
UtilsTools.DisplayRes(res);
Console.WriteLine("test where condition in no support in===========");
UtilsTools.ExecuteErrorQuery(conn, "select * from jsons1 where jtag->'tag1' in ('beijing')");
Console.WriteLine("test where condition match===========");
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1' match 'ma'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1' match 'ma$'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag2' match 'jing$'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select * from jsons1 where jtag->'tag1' match '收到'");
UtilsTools.DisplayRes(res);
Console.WriteLine("test distinct===========");
UtilsTools.ExecuteQuery(conn, "insert into jsons1_14 using jsons1 tags('{\"tag1\":\"涛思数据\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '涛思数据', 'dws')");
res = UtilsTools.ExecuteQuery(conn, "select distinct jtag->'tag1' from jsons1");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select distinct jtag from jsons1");
UtilsTools.DisplayRes(res);
Console.WriteLine("test dumplicate key with normal colomn===========");
UtilsTools.ExecuteQuery(conn, "INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"涛思数据\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")");
res = UtilsTools.ExecuteQuery(conn, "select *,tbname,jtag from jsons1 where jtag->'datastr' match '涛思数据' and datastr match 'js'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'");
UtilsTools.DisplayRes(res);
Console.WriteLine("test join===========");
UtilsTools.ExecuteQuery(conn, "create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)");
UtilsTools.ExecuteQuery(conn, "insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '涛思数据2')");
UtilsTools.ExecuteQuery(conn, "insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')");
UtilsTools.ExecuteQuery(conn, "create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)");
UtilsTools.ExecuteQuery(conn, "insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '涛思数据3')");
UtilsTools.ExecuteQuery(conn, "insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss')");
res = UtilsTools.ExecuteQuery(conn, "select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'");
UtilsTools.DisplayRes(res);
Console.WriteLine("test group by & order by json tag===========");
res = UtilsTools.ExecuteQuery(conn, "select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc");
UtilsTools.DisplayRes(res);
Console.WriteLine("test stddev with group by json tag===========");
res = UtilsTools.ExecuteQuery(conn, "select stddev(dataint) from jsons1 group by jtag->'tag1'");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select stddev(dataint) from jsons1 group by jsons1.jtag->'tag1'");
UtilsTools.DisplayRes(res);
Console.WriteLine("test top/bottom with group by json tag===========");
res = UtilsTools.ExecuteQuery(conn, "select top(dataint,100) from jsons1 group by jtag->'tag1'");
UtilsTools.DisplayRes(res);
Console.WriteLine("subquery with json tag===========");
res = UtilsTools.ExecuteQuery(conn, "select * from (select jtag, dataint from jsons1)");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)");
UtilsTools.DisplayRes(res);
res = UtilsTools.ExecuteQuery(conn, "select ts,tbname,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)");
UtilsTools.DisplayRes(res);
Console.WriteLine("");
}
}
}
\ No newline at end of file
...@@ -48,12 +48,17 @@ namespace Cases.EntryPoint ...@@ -48,12 +48,17 @@ namespace Cases.EntryPoint
Console.WriteLine("====================fetchfeilds==================="); Console.WriteLine("====================fetchfeilds===================");
FetchFields fetchFields = new FetchFields(); FetchFields fetchFields = new FetchFields();
fetchFields.Test(conn, "fetchfeilds"); fetchFields.Test(conn,"fetchfeilds");
UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); Console.WriteLine("===================JsonTagTest====================");
JsonTagTest jsonTagTest = new JsonTagTest();
jsonTagTest.Test(conn);
// UtilsTools.ExecuteQuery(conn, "drop database if exists csharp");
UtilsTools.CloseConnection(conn); UtilsTools.CloseConnection(conn);
UtilsTools.ExitProgram(); UtilsTools.ExitProgram();
} }
} }
} }
...@@ -10,8 +10,8 @@ namespace Cases ...@@ -10,8 +10,8 @@ namespace Cases
public void Test(IntPtr conn, string tableName) public void Test(IntPtr conn, string tableName)
{ {
IntPtr res = IntPtr.Zero; IntPtr res = IntPtr.Zero;
String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(id int);"; String createTb = "create stable " + tableName + " (ts timestamp ,b bool,v1 tinyint,v2 smallint,v4 int,v8 bigint,f4 float,f8 double,u1 tinyint unsigned,u2 smallint unsigned,u4 int unsigned,u8 bigint unsigned,bin binary(200),blob nchar(200))tags(jsontag json);";
String insertSql = "insert into " + tableName + "_t1 using " + tableName + " tags(1) values(1637064040000,true,1,2,3,4,5,6,7,8,9,10,'XI','XII')"; String insertSql = "insert into " + tableName + "_t1 using " + tableName + " tags('{\"k1\": \"v1\"}') values(1637064040000,true,1,2,3,4,5,6,7,8,9,10,'XI','XII')";
String selectSql = "select * from " + tableName; String selectSql = "select * from " + tableName;
String dropSql = "drop table " + tableName; String dropSql = "drop table " + tableName;
UtilsTools.ExecuteQuery(conn, createTb); UtilsTools.ExecuteQuery(conn, createTb);
...@@ -35,3 +35,5 @@ namespace Cases ...@@ -35,3 +35,5 @@ namespace Cases
} }
} }
} }
...@@ -39,6 +39,28 @@ namespace Test.UtilsTools ...@@ -39,6 +39,28 @@ namespace Test.UtilsTools
} }
return res; return res;
} }
public static IntPtr ExecuteErrorQuery(IntPtr conn, String sql)
{
IntPtr res = TDengine.Query(conn, sql);
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
{
Console.Write(sql.ToString() + " failure, ");
if (res != IntPtr.Zero)
{
Console.Write("reason: " + TDengine.Error(res));
}
Console.WriteLine("");
}
else
{
Console.WriteLine(sql.ToString() + " success");
}
return res;
}
public static void DisplayRes(IntPtr res) public static void DisplayRes(IntPtr res)
{ {
long queryRows = 0; long queryRows = 0;
...@@ -120,6 +142,10 @@ namespace Test.UtilsTools ...@@ -120,6 +142,10 @@ namespace Test.UtilsTools
string v10 = Marshal.PtrToStringAnsi(data); string v10 = Marshal.PtrToStringAnsi(data);
builder.Append(v10); builder.Append(v10);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_JSONTAG:
string v11 = Marshal.PtrToStringAnsi(data);
builder.Append(v11);
break;
} }
} }
builder.Append("---"); builder.Append("---");
......
Subproject commit dca4059d87c3f5c678a5e946978d40daec204e27 Subproject commit beca4813316f254624d8dbecf54d45a5a232c61d
...@@ -36,7 +36,11 @@ extern "C" { ...@@ -36,7 +36,11 @@ extern "C" {
#define atomic_exchange_8(ptr, val) _InterlockedExchange8((char volatile*)(ptr), (char)(val)) #define atomic_exchange_8(ptr, val) _InterlockedExchange8((char volatile*)(ptr), (char)(val))
#define atomic_exchange_16(ptr, val) _InterlockedExchange16((short volatile*)(ptr), (short)(val)) #define atomic_exchange_16(ptr, val) _InterlockedExchange16((short volatile*)(ptr), (short)(val))
#define atomic_exchange_32(ptr, val) _InterlockedExchange((long volatile*)(ptr), (long)(val)) #define atomic_exchange_32(ptr, val) _InterlockedExchange((long volatile*)(ptr), (long)(val))
#if _MSC_VER >= 1930
#define atomic_exchange_64(ptr, val) InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val))
#else
#define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val)) #define atomic_exchange_64(ptr, val) _InterlockedExchange64((__int64 volatile*)(ptr), (__int64)(val))
#endif
#ifdef _WIN64 #ifdef _WIN64
#define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val)) #define atomic_exchange_ptr(ptr, val) _InterlockedExchangePointer((void* volatile*)(ptr), (void*)(val))
#else #else
...@@ -91,7 +95,12 @@ extern "C" { ...@@ -91,7 +95,12 @@ extern "C" {
#define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val)) #define atomic_fetch_add_8(ptr, val) _InterlockedExchangeAdd8((char volatile*)(ptr), (char)(val))
#define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val)) #define atomic_fetch_add_16(ptr, val) _InterlockedExchangeAdd16((short volatile*)(ptr), (short)(val))
#define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val)) #define atomic_fetch_add_32(ptr, val) _InterlockedExchangeAdd((long volatile*)(ptr), (long)(val))
#if _MSC_VER >= 1930
#define atomic_fetch_add_64(ptr, val) InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val))
#else
#define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val)) #define atomic_fetch_add_64(ptr, val) _InterlockedExchangeAdd64((__int64 volatile*)(ptr), (__int64)(val))
#endif
#define atomic_sub_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), -(char)(val)) #define atomic_sub_fetch_8(ptr, val) interlocked_add_fetch_8((char volatile*)(ptr), -(char)(val))
#define atomic_sub_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), -(short)(val)) #define atomic_sub_fetch_16(ptr, val) interlocked_add_fetch_16((short volatile*)(ptr), -(short)(val))
......
...@@ -121,7 +121,7 @@ bool checkTzPresent(char *str, int32_t len) { ...@@ -121,7 +121,7 @@ bool checkTzPresent(char *str, int32_t len) {
} }
inline int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) { FORCE_INLINE int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) {
return taosParseTime(timestr, time, len, timePrec, day_light); return taosParseTime(timestr, time, len, timePrec, day_light);
} }
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#define HTTP_BUFFER_SIZE 8388608 #define HTTP_BUFFER_SIZE 8388608
#define HTTP_STEP_SIZE 4096 //http message get process step by step #define HTTP_STEP_SIZE 4096 //http message get process step by step
#define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size #define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size
#define HTTP_GC_TARGET_SIZE 512 #define HTTP_GC_TARGET_SIZE 16384
#define HTTP_WRITE_RETRY_TIMES 500 #define HTTP_WRITE_RETRY_TIMES 500
#define HTTP_WRITE_WAIT_TIME_MS 5 #define HTTP_WRITE_WAIT_TIME_MS 5
#define HTTP_PASSWORD_LEN TSDB_UNI_LEN #define HTTP_PASSWORD_LEN TSDB_UNI_LEN
......
...@@ -130,14 +130,34 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, ...@@ -130,14 +130,34 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
// for group by // for group by
if (groupFields != -1) { if (groupFields != -1) {
char target[HTTP_GC_TARGET_SIZE] = {0}; char target[HTTP_GC_TARGET_SIZE] = {0};
int32_t len; int32_t len = 0, cur = 0;
len = snprintf(target, HTTP_GC_TARGET_SIZE, "%s{", aliasBuffer); cur = snprintf(target, HTTP_GC_TARGET_SIZE, "%s{", aliasBuffer);
if (cur < 0 || cur >= HTTP_GC_TARGET_SIZE) {
httpError("context:%p, fd:%d, too long alias: %s", pContext, pContext->fd, aliasBuffer);
return false;
}
len += cur;
for (int32_t i = dataFields + 1; i < num_fields; i++) { for (int32_t i = dataFields + 1; i < num_fields; i++) {
// -2 means the last '}' and '\0'
#define HTTP_GC_CHECK_SIZE(name) if (cur < 0 || cur >= HTTP_GC_TARGET_SIZE - len - 2) { \
if (cur < 0) { \
httpError("context:%p, fd:%d, failed to snprintf for: %s", pContext, pContext->fd, name); \
} else { \
httpError("context:%p, fd:%d, snprintf overflow for: %s", pContext, pContext->fd, name); \
target[len] = '\0'; \
} \
break; \
} else { \
len += cur; \
}
if (row[i] == NULL) { if (row[i] == NULL) {
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:nil", fields[i].name); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:nil", fields[i].name);
HTTP_GC_CHECK_SIZE(fields[i].name)
if (i < num_fields - 1) { if (i < num_fields - 1) {
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, ", "); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, ", ");
HTTP_GC_CHECK_SIZE(fields[i].name)
} }
continue; continue;
...@@ -146,40 +166,49 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, ...@@ -146,40 +166,49 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
switch (fields[i].type) { switch (fields[i].type) {
case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_TINYINT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int8_t *)row[i])); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%d", fields[i].name, *((int8_t *)row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break; break;
case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_SMALLINT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int16_t *)row[i])); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%d", fields[i].name, *((int16_t *)row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break; break;
case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_INT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d,", fields[i].name, *((int32_t *)row[i])); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%d,", fields[i].name, *((int32_t *)row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break; break;
case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_BIGINT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%" PRId64, fields[i].name, *((int64_t *)row[i])); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%" PRId64, fields[i].name, *((int64_t *)row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break; break;
case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_FLOAT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, GET_FLOAT_VAL(row[i])); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%.5f", fields[i].name, GET_FLOAT_VAL(row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break; break;
case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_DOUBLE:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.9f", fields[i].name, GET_DOUBLE_VAL(row[i])); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%.9f", fields[i].name, GET_DOUBLE_VAL(row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break; break;
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
if (row[i] != NULL) { if (row[i] != NULL) {
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:", fields[i].name); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:", fields[i].name);
memcpy(target + len, (char *)row[i], length[i]); HTTP_GC_CHECK_SIZE(fields[i].name)
memcpy(target + len, (char *)row[i], MIN(length[i], HTTP_GC_TARGET_SIZE - len - 3));
len = (int32_t)strlen(target); len = (int32_t)strlen(target);
} }
break; break;
default: default:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%s", fields[i].name, "-"); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%s", fields[i].name, "-");
HTTP_GC_CHECK_SIZE(fields[i].name)
break; break;
} }
if (i < num_fields - 1) { if (i < num_fields - 1) {
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, ", "); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, ", ");
HTTP_GC_CHECK_SIZE(fields[i].name)
} }
} }
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "}"); cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 1, "}");
if (strcmp(target, targetBuffer) != 0) { if (strcmp(target, targetBuffer) != 0) {
// first target not write this section // first target not write this section
......
...@@ -590,7 +590,8 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { ...@@ -590,7 +590,8 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
} }
// outer query order by support // outer query order by support
int32_t orderColId = pQueryAttr->order.orderColId; int32_t orderColId = pQueryAttr->order.orderColId;
if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) {
if (pQueryAttr->vgId == 0 && orderColId != INT32_MIN) {
op = OP_Order; op = OP_Order;
taosArrayPush(plan, &op); taosArrayPush(plan, &op);
} }
...@@ -664,7 +665,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { ...@@ -664,7 +665,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
// outer query order by support // outer query order by support
int32_t orderColId = pQueryAttr->order.orderColId; int32_t orderColId = pQueryAttr->order.orderColId;
if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) { if (pQueryAttr->vgId == 0 && orderColId != INT32_MIN) {
op = OP_Order; op = OP_Order;
taosArrayPush(plan, &op); taosArrayPush(plan, &op);
} }
......
...@@ -4231,7 +4231,6 @@ char* parseTagDatatoJson(void *p){ ...@@ -4231,7 +4231,6 @@ char* parseTagDatatoJson(void *p){
memset(tagJsonKey, 0, sizeof(tagJsonKey)); memset(tagJsonKey, 0, sizeof(tagJsonKey));
memcpy(tagJsonKey, varDataVal(val), varDataLen(val)); memcpy(tagJsonKey, varDataVal(val), varDataLen(val));
}else{ // json value }else{ // json value
char tagJsonValue[TSDB_MAX_JSON_TAGS_LEN] = {0};
char* realData = POINTER_SHIFT(val, CHAR_BYTES); char* realData = POINTER_SHIFT(val, CHAR_BYTES);
char type = *(char*)val; char type = *(char*)val;
if(type == TSDB_DATA_TYPE_BINARY) { if(type == TSDB_DATA_TYPE_BINARY) {
...@@ -4244,14 +4243,16 @@ char* parseTagDatatoJson(void *p){ ...@@ -4244,14 +4243,16 @@ char* parseTagDatatoJson(void *p){
} }
cJSON_AddItemToObject(json, tagJsonKey, value); cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_NCHAR) { }else if(type == TSDB_DATA_TYPE_NCHAR) {
char *tagJsonValue = calloc(varDataLen(realData), 1);
int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue); int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue);
if (length < 0) { if (length < 0) {
tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset,
(char*)val); (char*)val);
free(tagJsonValue);
goto end; goto end;
} }
cJSON* value = cJSON_CreateString(tagJsonValue); cJSON* value = cJSON_CreateString(tagJsonValue);
free(tagJsonValue);
if (value == NULL) if (value == NULL)
{ {
goto end; goto end;
......
###################################################################
# Copyright (c) 2021 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def caseDescription(self):
'''
case1<shenglian zhou>: [TD-12145]function/clause program inserted column will be use as ts in outerquery
case2<shenglian zhou>: [TD-12164]elapsed function can only take primary timestamp as first parameter
case3<shenglian zhou>: [TD-12165]_c0 can not be alias name
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self._conn = conn
def run(self):
print("running {}".format(__file__))
tdSql.execute("drop database if exists td12145")
tdSql.execute("create database if not exists td12145")
tdSql.execute('use td12145')
tdSql.execute('create stable st(ts timestamp , value int ) tags (ind int)')
tdSql.execute('insert into tb1 using st tags(1) values(now ,1)')
tdSql.execute('insert into tb1 using st tags(1) values(now+1s ,2)')
tdSql.execute('insert into tb1 using st tags(1) values(now+2s ,3)')
tdSql.error('select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from tb1)')
tdSql.error('select elapsed(ts00 ,1s) from (select value ts00 from tb1)')
tdSql.error('select _c0 from (select value as _c0 , _c0 from st)')
tdSql.error('select ts from (select value as _c0 , ts from st)')
tdSql.query('select ts, max(nestvalue) from (select csum(value) nestvalue from tb1)')
tdSql.checkRows(1)
tdSql.checkData(0, 1, 6)
tdSql.execute('drop database td12145')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
python3 test.py -f 0-management/3-tag/json_tag.py python3 test.py -f 0-management/3-tag/json_tag.py
python3 test.py -f 1-insert/0-sql/batchInsert.py python3 test.py -f 1-insert/0-sql/batchInsert.py
python3 test.py -f 2-query/7-nest/ts_hidden_column.py
C#checker/bin/
C#checker/obj/
TDengineTest/bin/
TDengineTest/obj/
schemaless/bin/
schemaless/obj/
stmt/TDengineDriver.cs
stmt/TaosBind.cs
stmt/TaosMultiBind.cs
stmt/bin/
stmt/obj/
taosdemo/bin/
taosdemo/obj/
...@@ -7,6 +7,8 @@ IF (TD_LINUX) ...@@ -7,6 +7,8 @@ IF (TD_LINUX)
TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread ) TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread )
ADD_EXECUTABLE(sml schemaless.c) ADD_EXECUTABLE(sml schemaless.c)
TARGET_LINK_LIBRARIES(sml taos_static trpc tutil pthread ) TARGET_LINK_LIBRARIES(sml taos_static trpc tutil pthread )
ADD_EXECUTABLE(sqlperf sqlperf.c)
TARGET_LINK_LIBRARIES(sqlperf taos_static trpc tutil pthread )
ADD_EXECUTABLE(subscribe subscribe.c) ADD_EXECUTABLE(subscribe subscribe.c)
TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread ) TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread )
ADD_EXECUTABLE(epoll epoll.c) ADD_EXECUTABLE(epoll epoll.c)
......
...@@ -105,6 +105,35 @@ int32_t generateLine(char* line, int lineLen, char* lineTemplate, int protocol, ...@@ -105,6 +105,35 @@ int32_t generateLine(char* line, int lineLen, char* lineTemplate, int protocol,
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t setupSuperTables(TAOS* taos, char* lineTemplate, int protocol,
int numSuperTables, int numChildTables, int numRowsPerChildTable,
int maxBatchesPerThread, int64_t ts) {
printf("setup supertables...");
{
char** linesStb = calloc(numSuperTables, sizeof(char*));
for (int i = 0; i < numSuperTables; i++) {
char* lineStb = calloc(strlen(lineTemplate)+128, 1);
generateLine(lineStb, strlen(lineTemplate)+128, lineTemplate, protocol, i,
numSuperTables * numChildTables,
ts + numSuperTables * numChildTables * numRowsPerChildTable);
linesStb[i] = lineStb;
}
SThreadInsertArgs args = {0};
args.protocol = protocol;
args.batches = calloc(maxBatchesPerThread, sizeof(maxBatchesPerThread));
args.taos = taos;
args.batches[0].lines = linesStb;
args.batches[0].numLines = numSuperTables;
insertLines(&args);
free(args.batches);
for (int i = 0; i < numSuperTables; ++i) {
free(linesStb[i]);
}
free(linesStb);
}
return TSDB_CODE_SUCCESS;
}
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) {
int numThreads = 8; int numThreads = 8;
int maxBatchesPerThread = 1024; int maxBatchesPerThread = 1024;
...@@ -117,9 +146,10 @@ int main(int argc, char* argv[]) { ...@@ -117,9 +146,10 @@ int main(int argc, char* argv[]) {
int maxLinesPerBatch = 16384; int maxLinesPerBatch = 16384;
int protocol = TSDB_SML_TELNET_PROTOCOL; int protocol = TSDB_SML_TELNET_PROTOCOL;
int assembleSTables = 0;
int opt; int opt;
while ((opt = getopt(argc, argv, "s:c:r:f:t:b:p:hv")) != -1) { while ((opt = getopt(argc, argv, "s:c:r:f:t:b:p:w:hv")) != -1) {
switch (opt) { switch (opt) {
case 's': case 's':
numSuperTables = atoi(optarg); numSuperTables = atoi(optarg);
...@@ -142,6 +172,9 @@ int main(int argc, char* argv[]) { ...@@ -142,6 +172,9 @@ int main(int argc, char* argv[]) {
case 'v': case 'v':
verbose = true; verbose = true;
break; break;
case 'a':
assembleSTables = atoi(optarg);
break;
case 'p': case 'p':
if (optarg[0] == 't') { if (optarg[0] == 't') {
protocol = TSDB_SML_TELNET_PROTOCOL; protocol = TSDB_SML_TELNET_PROTOCOL;
...@@ -152,11 +185,11 @@ int main(int argc, char* argv[]) { ...@@ -152,11 +185,11 @@ int main(int argc, char* argv[]) {
} }
break; break;
case 'h': case 'h':
fprintf(stderr, "Usage: %s -s supertable -c childtable -r rows -f fields -t threads -b maxlines_per_batch -p [t|l|j] -v\n", fprintf(stderr, "Usage: %s -s supertable -c childtable -r rows -f fields -t threads -b maxlines_per_batch -p [t|l|j] -a assemble-stables -v\n",
argv[0]); argv[0]);
exit(0); exit(0);
default: /* '?' */ default: /* '?' */
fprintf(stderr, "Usage: %s -s supertable -c childtable -r rows -f fields -t threads -b maxlines_per_batch -p [t|l|j] -v\n", fprintf(stderr, "Usage: %s -s supertable -c childtable -r rows -f fields -t threads -b maxlines_per_batch -p [t|l|j] -a assemble-stables -v\n",
argv[0]); argv[0]);
exit(-1); exit(-1);
} }
...@@ -200,28 +233,9 @@ int main(int argc, char* argv[]) { ...@@ -200,28 +233,9 @@ int main(int argc, char* argv[]) {
getTelenetTemplate(lineTemplate, 65535); getTelenetTemplate(lineTemplate, 65535);
} }
printf("setup supertables..."); if (assembleSTables) {
{ setupSuperTables(taos, lineTemplate, protocol,
char** linesStb = calloc(numSuperTables, sizeof(char*)); numSuperTables, numChildTables, numRowsPerChildTable, maxBatchesPerThread, ts);
for (int i = 0; i < numSuperTables; i++) {
char* lineStb = calloc(strlen(lineTemplate)+128, 1);
generateLine(lineStb, strlen(lineTemplate)+128, lineTemplate, protocol, i,
numSuperTables * numChildTables,
ts + numSuperTables * numChildTables * numRowsPerChildTable);
linesStb[i] = lineStb;
}
SThreadInsertArgs args = {0};
args.protocol = protocol;
args.batches = calloc(maxBatchesPerThread, sizeof(maxBatchesPerThread));
args.taos = taos;
args.batches[0].lines = linesStb;
args.batches[0].numLines = numSuperTables;
insertLines(&args);
free(args.batches);
for (int i = 0; i < numSuperTables; ++i) {
free(linesStb[i]);
}
free(linesStb);
} }
printf("generate lines...\n"); printf("generate lines...\n");
......
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taos.h" // TAOS header file
#include "taoserror.h"
#include "os.h"
bool verbose = false;
bool describeTableFirst = false;
typedef struct{
TAOS* taos;
int numThreads;
int threadId;
int numSTables;
} SThreadArgs;
static int executeSql(TAOS *taos, char *command) {
if (verbose) {
printf("sql: %s\n", command);
}
TAOS_RES *pSql = NULL;
int32_t code = TSDB_CODE_SUCCESS;
pSql = taos_query(taos, command);
code = taos_errno(pSql);
if (code != 0) {
if (verbose) fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql));
taos_free_result(pSql);
return code;
}
taos_free_result(pSql);
return 0;
}
void* threadFunc(void* args) {
char* sqlDescribeSTable = "describe st%d";
char* sqlCreateSTable = "create table st%d (ts timestamp, value double) "
"tags(t0 nchar(20), t1 nchar(20), t2 nchar(20), t3 nchar(20), t4 nchar(20), "
"t5 nchar(20), t6 nchar(20), t7 nchar(20), t8 nchar(20), t9 nchar(20))";
char* sqlInsertData = "insert into t%d using st%d tags('t%d', 't%d', 't%d', 't%d', 't%d', 't%d', 't%d', 't%d', 't%d', 't%d') values(%lld, %d.%d)";
SThreadArgs* param = args;
int interval = param->numSTables/param->numThreads;
if (param->numSTables % param->numThreads != 0) {
++interval;
}
int start = param->threadId*interval;
int end = (param->threadId+1)*interval > param->numSTables ? param->numSTables : (param->threadId+1)*interval;
int r = rand();
for (int i = start; i < end; ++i) {
int tableId = i;
char sql0[1024] = {0};
char sql1[1024] = {0};
char sql2[1024] = {0};
sprintf(sql0, sqlDescribeSTable, tableId);
sprintf(sql1, sqlCreateSTable, tableId);
time_t ct = time(0);
int64_t ts = ct * 1000;
sprintf(sql2, sqlInsertData, tableId, tableId, r, r, r, r, r, r, r, r, r, r, ts + tableId, r, r);
if (describeTableFirst) {
executeSql(param->taos, sql0);
}
executeSql(param->taos, sql1);
executeSql(param->taos, sql2);
}
return NULL;
}
int main(int argc, char *argv[]) {
int numSTables = 20000;
int numThreads = 32;
int opt;
while ((opt = getopt(argc, argv, "s:t:fvh")) != -1) {
switch (opt) {
case 's':
numSTables = atoi(optarg);
break;
case 't':
numThreads = atoi(optarg);
break;
case 'f':
describeTableFirst = true;
break;
case 'v':
verbose = true;
break;
case 'h':
fprintf(stderr, "Usage: %s -s supertable -t thread -FirstDescribeSTable -Verbose\n", argv[0]);
exit(0);
default:
fprintf(stderr, "Usage: %s -s supertable -t thread -FirstDescribeSTable -Verbose\n", argv[0]);
exit(-1);
}
}
// connect to server
TAOS *taos = taos_connect(NULL, "root", "taosdata", NULL, 0);
if (taos == NULL) {
printf("failed to connect to server, reason:%s\n", "null taos" /*taos_errstr(taos)*/);
exit(1);
}
executeSql(taos, "drop database if exists sqlsml");
executeSql(taos, "create database sqlsml");
executeSql(taos, "use sqlsml");
pthread_t* tids = calloc(numThreads, sizeof(pthread_t));
SThreadArgs* threadArgs = calloc(numThreads, sizeof(SThreadArgs));
for (int i = 0; i < numThreads; ++i) {
threadArgs[i].numSTables = numSTables;
threadArgs[i].numThreads = numThreads;
threadArgs[i].threadId = i;
threadArgs[i].taos = taos;
}
int64_t begin = taosGetTimestampUs();
for (int i = 0; i < numThreads; ++i) {
pthread_create(tids+i, NULL, threadFunc, threadArgs+i);
}
for (int i = 0; i < numThreads; ++i) {
pthread_join(tids[i], NULL);
}
int64_t end = taosGetTimestampUs();
printf("TIME: %d(ms)\n", (int)((end-begin)/1000));
printf("THROUGHPUT: %d\n", (int)((numSTables * 1e6) / (end-begin)));
free(threadArgs);
free(tids);
taos_close(taos);
taos_cleanup();
}
...@@ -7,4 +7,7 @@ ulimit -c unlimited ...@@ -7,4 +7,7 @@ ulimit -c unlimited
# python3 test.py -f restful/restful_bind_db2.py # python3 test.py -f restful/restful_bind_db2.py
python3 ./test.py -f client/nettest.py python3 ./test.py -f client/nettest.py
python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_query.py
python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_insert.py
#======================p1-end=============== #======================p1-end===============
...@@ -167,6 +167,7 @@ python3 ./test.py -f update/merge_commit_data.py ...@@ -167,6 +167,7 @@ python3 ./test.py -f update/merge_commit_data.py
# tools # tools
python3 test.py -f tools/taosdumpTest.py python3 test.py -f tools/taosdumpTest.py
python3 test.py -f tools/taosdumpTest2.py python3 test.py -f tools/taosdumpTest2.py
python3 test.py -f tools/taosdumpTest3.py
python3 test.py -f tools/taosdemoTest.py python3 test.py -f tools/taosdemoTest.py
python3 test.py -f tools/taosdemoTestWithoutMetric.py python3 test.py -f tools/taosdemoTestWithoutMetric.py
......
...@@ -266,7 +266,7 @@ class ElapsedCase: ...@@ -266,7 +266,7 @@ class ElapsedCase:
self.limitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname", 1) self.limitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname", 1)
def fromCheck(self, sqlTemplate, table): def fromCheck(self, sqlTemplate, table):
tdSql.checkEqual(tdSql.getResult(sqlTemplate % table), tdSql.getResult(sqlTemplate % ("(select * from %s)" % table))) #tdSql.checkEqual(tdSql.getResult(sqlTemplate % table), tdSql.getResult(sqlTemplate % ("(select * from %s)" % table)))
tdSql.query(sqlTemplate % ("(select last(ts) from %s interval(10s))" % table)) tdSql.query(sqlTemplate % ("(select last(ts) from %s interval(10s))" % table))
tdSql.query(sqlTemplate % ("(select elapsed(ts) from %s interval(10s))" % table)) tdSql.query(sqlTemplate % ("(select elapsed(ts) from %s interval(10s))" % table))
......
...@@ -59,6 +59,7 @@ class TDTestCase: ...@@ -59,6 +59,7 @@ class TDTestCase:
assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
def run(self): def run(self):
tdSql.prepare()
buildPath = self.getBuildPath() buildPath = self.getBuildPath()
if (buildPath == ""): if (buildPath == ""):
tdLog.exit("taosd not found!") tdLog.exit("taosd not found!")
...@@ -66,24 +67,38 @@ class TDTestCase: ...@@ -66,24 +67,38 @@ class TDTestCase:
tdLog.info("taosd found in %s" % buildPath) tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/" binPath = buildPath+ "/build/bin/"
# clear env # clear envs
os.system("ps -ef |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9")
os.system("ps -aux |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |awk '{print $2}'|xargs kill -9 >/dev/null 2>&1")
os.system("ps -aux |grep 'tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json' |awk '{print $2}'|xargs kill -9 >/dev/null 2>&1")
os.system("rm -rf ./subscribe_res*") os.system("rm -rf ./subscribe_res*")
os.system("rm -rf ./all_subscribe_res*") os.system("rm -rf ./all_subscribe_res*")
# insert data # insert data
os.system("%staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json" % binPath) os.system("nohup %staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json & >/dev/null 2>&1" % binPath)
os.system("nohup %staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json &" % binPath) sleep(5)
query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json" |grep -v "grep"|awk \'{print $2}\'')[1]) tdSql.query("select count(*) from subnsdb.stb0")
if tdSql.checkData(0,0,100):
pass
else:
sleep(5)
tdSql.query("select count(*) from subnsdb.stb0") # if records not write done ,sleep and wait records write done!
os.system(" nohup %staosBenchmark -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json & >/dev/null 2>&1" % binPath)
sleep(5)
if os.path.exists("./subscribe_res0.txt") and os.path.exists("./subscribe_res1.txt") and os.path.exists("./subscribe_res2.txt"):
pass
else:
sleep(5) # make sure query is ok
print('taosBenchmark query done!')
# merge result files # merge result files
sleep(5)
os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt")
os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt")
sleep(5)
# correct subscribeTimes testcase # correct subscribeTimes testcase
subTimes0 = self.subTimes("all_subscribe_res0.txt") subTimes0 = self.subTimes("all_subscribe_res0.txt")
...@@ -103,17 +118,16 @@ class TDTestCase: ...@@ -103,17 +118,16 @@ class TDTestCase:
os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt")
subTimes0 = self.subTimes("all_subscribe_res0.txt") subTimes0 = self.subTimes("all_subscribe_res0.txt")
print("pass")
self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202)
# correct data testcase
os.system("kill -9 %d" % query_pid)
sleep(3) sleep(3)
os.system("rm -rf ./subscribe_res*") os.system("rm -rf ./subscribe_res*")
os.system("rm -rf ./all_subscribe*") os.system("rm -rf ./all_subscribe*")
os.system("rm -rf ./*.py.sql") os.system("rm -rf ./*.py.sql")
os.system("rm -rf ./nohup*")
os.system("ps -aux |grep 'taosdemoAllTest/taosdemoTestSupportNanoSubscribe.json' |awk '{print $2}'|xargs kill -9 >/dev/null 2>&1")
os.system("ps -aux |grep 'tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoSubscribe.json' |awk '{print $2}'|xargs kill -9 >/dev/null 2>&1")
os.system("ps -aux |grep 'tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json' |awk '{print $2}'|xargs kill -9 >/dev/null 2>&1")
...@@ -123,3 +137,4 @@ class TDTestCase: ...@@ -123,3 +137,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
...@@ -251,7 +251,7 @@ class TDDnode: ...@@ -251,7 +251,7 @@ class TDDnode:
"dnode:%d is deployed and configured by %s" % "dnode:%d is deployed and configured by %s" %
(self.index, self.cfgPath)) (self.index, self.cfgPath))
def getBuildPath(self): def getBuildPath(self, tool="taosd"):
buildPath = "" buildPath = ""
selfPath = os.path.dirname(os.path.realpath(__file__)) selfPath = os.path.dirname(os.path.realpath(__file__))
...@@ -261,7 +261,7 @@ class TDDnode: ...@@ -261,7 +261,7 @@ class TDDnode:
projPath = selfPath[:selfPath.find("tests")] projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath): for root, dirs, files in os.walk(projPath):
if (("taosd") in files): if ((tool) in files):
rootRealPath = os.path.dirname(os.path.realpath(root)) rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath): if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")] buildPath = root[:len(root)-len("/build/bin")]
......
...@@ -50,6 +50,9 @@ sql insert into t3 values('2017-12-25 21:25:41', 3) ...@@ -50,6 +50,9 @@ sql insert into t3 values('2017-12-25 21:25:41', 3)
sql insert into t3 values('2017-12-25 21:26:41', 3) sql insert into t3 values('2017-12-25 21:26:41', 3)
sql insert into t3 values('2017-12-25 21:27:41', 3) sql insert into t3 values('2017-12-25 21:27:41', 3)
sql create table m3 (ts timestamp, col1 int, col2 float, txt binary(500))
sql insert into m3 values(now, 1, 2.0, 'HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS')
print =============== step2 - login print =============== step2 - login
system_content curl 127.0.0.1:7111/grafana/ system_content curl 127.0.0.1:7111/grafana/
...@@ -179,4 +182,10 @@ if $system_content != @[{"refId":"A","target":"{count(v1):3}","datapoints":[[15. ...@@ -179,4 +182,10 @@ if $system_content != @[{"refId":"A","target":"{count(v1):3}","datapoints":[[15.
return -1 return -1
endi endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"taosd","sql":"select last(col1), last(col2), last(txt) from d1.m3 group by txt"}]' 127.0.0.1:7111/grafana/query
print 20-> $system_content
if $system_content != @[{"refId":"A","target":"taosd{last(col2):2.00000, last(txt):HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS}","datapoints":[[1,"-"]]}]@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def __init__(self):
self.err_case = 0
self.curret_case = 0
def caseDescription(self):
'''
case1 <cpwu>: [TD-12340] : group by ts should resturn two column ;\n
case2 <cpwu>: [TD-12342] : "group by ts order by first-tag" should return error
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def create_stb(self):
basetime = int(round(time.time() * 1000))
tdSql.prepare()
tdSql.execute(f"create stable stb1(ts timestamp, c1 int) tags (tag1 int)")
for i in range(10):
tdSql.execute(f"create table t{i} using stb1 tags({i})")
tdSql.execute(f"insert into t{i} values ({basetime}, {i})")
pass
def check_td12340(self):
# this case expect return two column when using "group by ts"
tdSql.query("select count(*) from stb1 group by ts")
try:
tdSql.checkCols(2)
self.curret_case += 1
tdLog.printNoPrefix("the case1: td-12340 run passed")
except:
self.err_case += 1
tdLog.printNoPrefix("the case1: td-12340 run failed")
pass
def check_td12342(self):
# this case expect return err when using "group by ts order by first-tag"
try:
tdSql.error("select count(*) from stb1 group by ts order by tag1")
self.curret_case += 1
tdLog.printNoPrefix("the case2: td-12342 run passed")
except:
self.err_case += 1
tdLog.printNoPrefix("the case2: td-12342 run failed")
pass
def run(self):
self.create_stb()
self.check_td12340()
self.check_td12342()
if self.err_case > 0:
tdLog.exit(f"{self.err_case} is failed")
else:
tdLog.success("2 case is all passed")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from posixpath import split
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record
self.num = 10
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def caseDescription(self):
'''
case1 <wenzhouwww>: [TD-11389] :
this test case is an test case for cache error , it will let the cached data obtained by the client that has connected to taosd incorrect,
root cause : table schema is changed, tag hostname size is increased through schema-less insertion. The schema cache of client taos is not refreshed.
'''
return
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def getcfgPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
print(selfPath)
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
cfgPath = projPath + "/sim/dnode1/cfg "
return cfgPath
def run(self):
tdSql.prepare()
tdSql.execute("create database if not exists testdb keep 36500;")
tdSql.execute("use testdb;")
tdSql.execute("create stable st (ts timestamp , id int , value double) tags(hostname binary(10) ,ind int);")
for i in range(self.num):
tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+10000*i,i*2,i+10.00))
tdSql.query('select elapsed(ts,10s) from sub_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1d) ;')
cfg_path = self.getcfgPath()
print(cfg_path)
# tdSql.execute('select elapsed(ts,10s) from st where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1d) group by tbname;') # session not support super table
os.system("taos -c %s -s 'select elapsed(ts,10s) from testdb.st where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" session(ts,1d) group by tbname;' " % (cfg_path))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
{"base_url": "127.0.0.1", "precision": "ms", "clear_data": true, "database_name": "db", "tbnum": 10, "data_row": 100, "case_file": "data_insert.csv", "basetime": 1639969683873, "all_case": false, "all_err": false, "all_current": true, "err_case": {"port_err": true, "api_err": true, "header_err": true, "db_tb_err": true, "data_err": true}, "current_case": {"port_current": true, "api_current": true, "header_current": true, "db_tb_current": true, "data_current": true}}
\ No newline at end of file
{"base_url": "127.0.0.1", "precision": "ms", "clear_data": true, "database_name": "db", "tbnum": 10, "data_row": 100, "basetime": 1639969706198, "all_case": false, "all_err": false, "all_current": true, "err_case": {"port_err": true, "api_err": true, "header_err": true, "sql_err": true}, "current_case": {"port_current": true, "api_current": true, "header_current": true, "sql_current": true}}
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import random
import inspect
import taos
import requests
import json
import traceback
import simplejson.errors
import csv
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class RestMsgInfo:
def __init__(self, base_url,
port=6041,
api_url="/rest/sql",
header={'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
):
self.base_url = base_url
self.port = port
self.api_url = api_url
self.header = header
self.full_url = f"http://{base_url}:{port}{api_url}"
class TDTestCase:
def __init__(self):
self.base_url = "127.0.0.1"
self.dbname = "db"
self.precision = "ms"
self.tbnum = 0
self.data_row = 0
self.basetime = 0
self.file = ""
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def caseDescription(self):
'''
case1 <cpwu>: create/alter/drop database/normal_table/child_table/stable \n
case2 <cpwu>: insert into table multiple records \n
case3 <cpwu>: insert multiple records into a given column \n
case4 <cpwu>: insert multiple records into multiple tables \n
case5 <cpwu>: automatically create a table when inserting, and specify a given tags column \n
case6 <cpwu>: insert with files \n
case7 <cpwu>: api_url test \n
case8 <cpwu>: base_url test \n
case9 <cpwu>: header test
'''
return
def rest_test_table(self, dbname: str, tbnum: int) -> None :
tdSql.execute(f"drop database if exists {dbname}")
tdSql.execute(f"create database if not exists {dbname} keep 3650 precision '{self.precision}' ")
tdSql.execute(f"use {dbname}")
tdSql.execute(
f'''
create stable {dbname}.stb1 (
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool,
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
)
tags(
tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool,
tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16)
)
'''
)
tdSql.execute(
f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(ttag1 int)"
)
for i in range(tbnum):
tdSql.execute(
f'''
create table {dbname}.t{i} using {dbname}.stb1
tags(
{i}, {i}, {1639032680000+i*10}, 'binary_{i}',{i},{random.choice([0, 1])}, {i},{i%32767},{i%127},'nchar_{i}'
)'''
)
tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})")
tdSql.execute(
f"create table {dbname}.nt1 (ts timestamp, c1 int, c2 float)"
)
tdSql.execute(
f"create table {dbname}.nt2 (ts timestamp, c1 int, c2 float)"
)
pass
def rest_test_data(self, tbnum:int, data_row:int, basetime:int) -> None :
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
f"insert into t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
def check_err_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
try:
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
if resp_code != 200:
tdLog.success(f"expect error occured, usrl: {url}, sql: {data}, error code is :{resp_code}")
return
status = resp["status"]
desc = resp["desc"]
if resp_code == 200 and status == "error":
tdLog.success(f"expect error occured, usrl: {url}, sql: {data}, error is :{desc}")
return
else:
tdLog.exit(f"expect error not occured")
except requests.exceptions.InvalidHeader as e:
print(f"expect error occured, request header error, header: {header}, error: {e}")
except requests.exceptions.InvalidURL as e:
print(f"expect error occured, request url error, url: {url}, error: {e}")
except requests.exceptions.ConnectionError as e:
print(f"expect error occured, request connection error,url: {url}, error: {e}")
except simplejson.errors.JSONDecodeError as e:
print(f"expect error occured, request json error,url: {url}, header: {header}, error: {e}")
except Exception as e:
print(f"expect error occured, url: {url}, header: {header}, {traceback.print_exc()}")
# finally:
# conn.close()
pass
def check_err_sql_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
try:
status = resp["status"]
desc = resp["desc"]
if resp_code == 200 and status == "error":
tdLog.success(f"expect error occured, url: {url}, error is :{desc}")
return
else:
tdLog.exit(f"expect error not occured")
except Exception as e:
tdLog.debug(f"url: {url}, resp: {resp} ")
traceback.print_exc()
raise e
def check_current_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
try:
status = resp["status"]
if resp_code == 200 and status == "succ":
tdLog.success(f"restfull run success! url:{url}")
else:
tdLog.exit(f"restful api test failed, url:{url}, sql: {data}, resp: {resp}")
except:
tdLog.debug(f"resp_code: {resp_code}, url: {url}, resp:{resp}")
traceback.print_exc()
raise
pass
def check_case_res_data(self, query_msg: RestMsgInfo, data):
url, header, api = query_msg.full_url, query_msg.header, query_msg.api_url
try:
ts_col = []
stb_list = [f"describe {self.dbname}.stb1", f"describe {self.dbname}.stb2"]
for stb in stb_list:
conn = requests.post(url=url, data=stb, headers=header)
resp = conn.json()
for col in resp["data"]:
if "TIMESTAMP" == col[1]:
ts_col.append(col[0])
check_column = []
conn = requests.post(url=url, data=data, headers=header)
resp = conn.json()
if len(resp["data"]) < 1:
return
for meta in resp["column_meta"]:
if meta[0] in ts_col:
check_column.append(meta[0])
if len(check_column) < 1:
return
if self.precision == "ms" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"):
return
except:
raise
pass
def db_tb_case_current(self):
# when version > 2.6, add the follow case:
# f"alter table {self.dbname}.tb1 add column c2 float",
# f"alter table {self.dbname}.tb1 drop column c2 ",
# f"alter table {self.dbname}.tb1 add column c2 float ; alter table {self.dbname}.tb1 drop column c2 ",
case_list = [
"create database if not exists db",
"create database if not exists db",
"create database if not exists db1",
"alter database db1 comp 2",
"alter database db1 keep 36500",
"drop database if exists db1",
"drop database if exists db1",
"drop database if exists db",
f"create database if not exists {self.dbname}",
f"create table if not exists {self.dbname}.tb1 (ts timestamp , c1 int)",
f"create table if not exists {self.dbname}.tb1 (ts timestamp , c1 float)",
f"create table if not exists {self.dbname}.stb1 (ts timestamp , c1 int) tags(tag1 int )",
f"create table if not exists {self.dbname}.stb1 (ts timestamp , c1 float) tags(tag2 int )",
f"create table if not exists {self.dbname}.stb2 (ts timestamp , c1 int) tags(tag1 int )",
f"create table if not exists {self.dbname}.stb3 (ts timestamp , c1 int) tags(tag1 int )",
f"create table if not exists {self.dbname}.tb2 using {self.dbname}.stb2 tags(2)",
f"create table if not exists {self.dbname}.tb3 using {self.dbname}.stb2 tags(2)",
f"drop table if exists {self.dbname}.tb2",
f"drop table if exists {self.dbname}.tb2",
f"drop table if exists {self.dbname}.stb2",
f"drop table if exists {self.dbname}.stb2",
f"drop table if exists {self.dbname}.t3",
f"drop table if exists {self.dbname}.stb3",
]
return case_list
def db_tb_case_err(self):
case_list = [
"create database if exists db",
f"drop database if not exists db",
f"drop database db3",
f"create table if exists {self.dbname}.t1 ",
f"create table if exists {self.dbname}.stb1 ",
f"drop table if not exists {self.dbname}.stb1 ",
f"drop table {self.dbname}.stb4 ",
f"create table if not exists {self.dbname}.stb2 (c1 int, c2 timestamp ) tags(tag1 int)",
f"create table if exists {self.dbname}.stb3 (ts timestamp ,c1 int) ",
f"create table if exists {self.dbname}.t2 (c1 int) "
]
return case_list
def data_case_current(self, tbnum:int, data_row:int, basetime: int, file:str):
case_list = []
body_list = []
row_times = data_row // 100
row_alone = data_row % 100
for i in range(row_times):
body = ""
for j in range(100):
body += f"(\
{basetime + (j+1)*10+ i*1000}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)},\
'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, \
{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' \
)"
body_list.append(body)
if row_alone != 0:
body = ""
for j in range(row_alone):
body += f"( \
{basetime + (j+1)*10+ row_times*1000}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, \
{basetime + random.randint(-200, -1)},'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, \
{random.randint(-200,-1)},{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' \
)"
body_list.append(body)
for i in range(tbnum):
pre_insert = f"insert into {self.dbname}.t{i} values "
for value_body in body_list:
insert_sql = pre_insert + value_body
case_list.append(insert_sql)
case_list.append(f'insert into {self.dbname}.nt1 values (now, 1, 1.0)')
case_list.append(f'insert into {self.dbname}.nt1 values ({basetime + 10}, 2, 2.0)')
case_list.append(f'insert into {self.dbname}.nt1 values ({basetime + 20}, 3, 3.0) {self.dbname}.nt2 values (now, 1, 1.0)')
case_list.append(f'insert into {self.dbname}.nt1 (ts, c2, c1) values ({basetime + 20}, 4.0, 4) ')
# exchange column order
case_list.append(f'insert into {self.dbname}.ct1 using {self.dbname}.stb1 (tag1) tags(1) (ts, c1) values (now, 1)')
# insert with file
if not os.path.isfile(file):
with open(file=file, mode="w", encoding="utf-8", newline="") as f:
for j in range(data_row):
writer = csv.writer(f)
data_line = [
basetime - (j + 1) * 10, random.randint(-200, -1), random.uniform(200, -1),
basetime + random.randint(-200, -1), f'"binary_{j}"', random.uniform(-200, -1),
random.choice([0, 1]), random.randint(-200, -1), random.randint(-200, -1),
random.randint(-127, -1), f'"nchar_{j}"'
]
writer.writerow(data_line)
case_list.append(f"insert into {self.dbname}.ct1 file {file}")
return case_list
pass
def data_case_err(self):
case_list = []
nowtime = int(round(time.time()*1000))
bigger_insert_sql = f"insert into {self.dbname}.nt1 values"
for i in range(40000):
bigger_insert_sql += f"({nowtime-i*10}, {i}, {i*1.0})"
case_list.append(bigger_insert_sql)
nodata_sql = f"insert into {self.dbname}.nt1 values()"
case_list.append(nodata_sql)
less_data_sql = f"insert into {self.dbname}.nt1 values(now)"
case_list.append(less_data_sql)
errtype_data_sql = f"insert into {self.dbname}.nt1 values(now+2, 1.0, 'binary_2')"
case_list.append(errtype_data_sql)
# insert into super table directly
insert_super_data_sql = f"insert into {self.dbname}.stb1 values(now+3, 1, 1.0)"
case_list.append(insert_super_data_sql)
return case_list
def port_case_current(self):
case_list = [6041]
return case_list
def port_case_err(self):
case_list = [
6030,
6051,
666666666,
None,
"abcd"
]
return case_list
def api_case_current(self):
case_List = [
"/rest/sql",
f"/rest/sql/{self.dbname}",
"/rest/sqlt",
f"/rest/sqlt/{self.dbname}",
"/rest/sqlutc",
f"/rest/sqlutc/{self.dbname}"
]
return case_List
def api_case_err(self):
case_list = [
"",
"/rest1/sql",
"/rest/sqlsqltsqlutc",
1,
["/rest", "/sql"],
"/influxdb/v1/write",
"/opentsdb/v1/put/json/db",
"/opentsdb/v1/put/telnet/db",
"/rest*",
"*"
]
return case_list
def header_case_current(self):
case_list = [
{'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='},
{'Authorization': 'Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04'}
]
return case_list
def header_case_err(self):
case_list = [
{'Authorization': 'Basic '},
{'Authorization': 'Taosd /root/taosdata'},
{'Authorization': True}
]
return case_list
def run_case_api_err(self):
err_cases = self.api_case_err()
count = 0
data = "create database if not exists db"
for case in err_cases:
print(f"err api case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, api_url=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_port_err(self):
err_cases = self.port_case_err()
count = 0
data = "create database if not exists db"
for case in err_cases:
print(f"err port case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, port=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_header_err(self):
err_cases = self.header_case_err()
count = 0
data = "create database if not exists db"
for case in err_cases:
print(f"err header case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, header=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_db_tb_err(self):
err_cases = self.db_tb_case_err()
count = 0
query_msg = RestMsgInfo(base_url=self.base_url)
for case in err_cases:
print(f"err create db/tb case{count}: ", end="")
self.check_err_sql_case(query_msg=query_msg, data=case)
count += 1
pass
def run_case_data_err(self):
err_cases = self.data_case_err()
count = 0
tdSql.execute(f"drop database if exists {self.dbname}")
tdSql.execute(f"create database if not exists {self.dbname} keep 3650 precision '{self.precision}' ")
tdSql.execute(f"use {self.dbname}")
tdSql.execute(
f'''
create stable {self.dbname}.stb1 (
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool,
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
)
tags(
tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool,
tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16)
)
'''
)
query_msg = RestMsgInfo(base_url=self.base_url)
for case in err_cases:
print(f"err insert data case{count}: ", end="")
self.check_err_sql_case(query_msg=query_msg, data=case)
count += 1
tdSql.execute(f"drop database if exists {self.dbname}")
pass
def run_case_port_current(self):
current_cases = self.port_case_current()
count = 0
data = "create database if not exists db"
for case in current_cases:
print(f"current port case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, port=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_api_current(self):
current_cases = self.api_case_current()
count = 0
data = "create database if not exists db"
for case in current_cases:
print(f"current api case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, api_url=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_header_current(self):
current_cases = self.header_case_current()
count = 0
data = "create database if not exists db"
for case in current_cases:
print(f"current header case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, header=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_db_tb_current(self):
current_cases = self.db_tb_case_current()
count = 0
for case in current_cases:
print(f"current insert db/tb case{count}: ", end="")
for api in ["/rest/sql", "/rest/sqlt", "/rest/sqlutc"]:
query_msg = RestMsgInfo(base_url=self.base_url, api_url=api)
self.check_current_case(query_msg=query_msg, data=case)
count += 1
pass
def run_case_data_current(self):
self.rest_test_table(dbname=self.dbname, tbnum=self.tbnum)
current_cases = self.data_case_current(tbnum=self.tbnum, data_row=self.data_row, basetime=self.basetime, file=self.file)
count = 0
print(current_cases[12])
api_cases = self.api_case_current()
for case in current_cases:
print(f"current insert data case{count}: ", end="")
for api in api_cases:
query_msg = RestMsgInfo(base_url=self.base_url, api_url=api)
self.check_current_case(query_msg=query_msg, data=case)
count += 1
pass
def run_case_err(self):
self.run_case_api_err()
self.run_case_port_err()
self.run_case_header_err()
self.run_case_db_tb_err()
self.run_case_data_err()
pass
def run_case_current(self):
self.run_case_api_current()
self.run_case_port_current()
self.run_case_header_current()
self.run_case_db_tb_current()
self.run_case_data_current()
pass
def run_all_case(self):
self.run_case_err()
self.run_case_current()
pass
def set_default_args(self):
nowtime = int(round(time.time() * 1000))
url = "127.0.0.1"
per_table_rows = 100
tbnum = 10
database_name = "db"
precision ="ms"
clear_data = True
insert_case_filename = "data_insert.csv"
config_default = {
"base_url" : url,
"precision" : precision,
"clear_data" : clear_data,
"database_name" : database_name,
"tbnum" : tbnum,
"data_row" : per_table_rows,
"case_file" : insert_case_filename,
"basetime" : nowtime,
"all_case" : False,
"all_err" : False,
"all_current" : True,
"err_case" : {
"port_err" : True,
"api_err" : True,
"header_err" : True,
"db_tb_err" : True,
"data_err" : True,
},
"current_case" : {
"port_current" : True,
"api_current" : True,
"header_current" : True,
"db_tb_current" : True,
"data_current" : True,
}
}
config_file_name = f"{os.path.dirname(os.path.abspath(__file__))}/rest_insert_config.json"
with open(config_file_name, "w") as f:
json.dump(config_default, f)
return config_file_name
def run(self):
config_file = f"{os.path.dirname(os.path.abspath(__file__))}/rest_insert_config.json"
if not os.path.isfile(config_file):
config_file = self.set_default_args()
with open(config_file, "r", encoding="utf-8") as f:
cfg = json.load(f)
self.tbnum = cfg["tbnum"]
self.data_row = cfg["data_row"]
self.basetime = cfg["basetime"]
self.dbname = cfg["database_name"]
self.base_url = cfg["base_url"]
self.precision = cfg["precision"]
self.file = cfg["case_file"]
clear_data = True if cfg["clear_data"] else False
if clear_data:
self.rest_test_table(dbname=self.dbname, tbnum=self.tbnum)
run_all_case = True if cfg["all_case"] else False
run_all_err_case = True if cfg["all_err"] else False
run_all_current_case = True if cfg["all_current"] else False
run_port_err_case = True if cfg["err_case"]["port_err"] else False
run_api_err_case = True if cfg["err_case"]["api_err"] else False
run_header_err_case = True if cfg["err_case"]["header_err"] else False
run_db_tb_err_case = True if cfg["err_case"]["db_tb_err"] else False
run_data_err_case = True if cfg["err_case"]["data_err"] else False
run_port_current_case = True if cfg["current_case"]["port_current"] else False
run_api_current_case = True if cfg["current_case"]["api_current"] else False
run_header_current_case = True if cfg["current_case"]["header_current"] else False
run_db_tb_current_case = True if cfg["current_case"]["db_tb_current"] else False
run_data_current_case = True if cfg["current_case"]["data_current"] else False
print("run_all_case:" ,run_all_case)
print("run_all_err_case:" ,run_all_err_case)
print("run_all_current_case:" ,run_all_current_case)
print("run_port_err_case:" ,run_port_err_case)
print("run_api_err_case:" ,run_api_err_case)
print("run_header_err_case:" ,run_header_err_case)
print("run_db_tb_err_case:" ,run_db_tb_err_case)
print("run_data_err_case:" ,run_data_err_case)
print("run_port_current_case:" ,run_port_current_case)
print("run_api_current_case:" ,run_api_current_case)
print("run_header_current_case:" ,run_header_current_case)
print("run_db_tb_current_case:" ,run_db_tb_current_case)
print("run_data_current_case:" ,run_data_current_case)
if not (run_all_err_case | run_all_current_case | run_port_err_case | run_api_err_case | run_header_err_case |
run_db_tb_err_case | run_data_err_case | run_port_current_case | run_api_current_case |
run_header_current_case | run_db_tb_current_case | run_data_current_case ):
run_all_case = True
if run_all_err_case & run_all_current_case:
run_all_case = True
if run_all_case:
self.run_all_case()
return
if run_all_err_case :
self.run_case_err()
return
if run_all_current_case:
self.run_case_current()
return
if run_port_err_case:
self.run_case_port_err()
if run_api_err_case:
self.run_case_api_err()
if run_header_err_case:
self.run_case_header_err()
if run_db_tb_err_case:
self.run_case_db_tb_err()
if run_data_err_case:
self.run_case_data_err()
if run_port_current_case:
self.run_case_port_current()
if run_api_current_case:
self.run_case_api_current()
if run_header_current_case:
self.run_case_header_current()
if run_db_tb_current_case:
self.run_case_db_tb_current()
if run_data_current_case:
self.run_case_data_current()
pass
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import random
import inspect
import taos
import requests
import json
import traceback
import simplejson.errors
import math
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
from collections import defaultdict
class RestMsgInfo:
def __init__(self, base_url,
port=6041,
api_url="/rest/sql",
header={'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
):
self.base_url = base_url
self.port = port
self.api_url = api_url
self.header = header
self.full_url = f"http://{base_url}:{port}{api_url}"
class TDTestCase:
def __init__(self):
self.base_url = "127.0.0.1"
self.dbname = "db"
self.precision = "ms"
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def caseDescription(self):
'''
case1 <cpwu>: specified SQL
case2 <cpwu>: select sql,include stable 、child table and normal table, include correct SQL and invalid SQL \n
case3 <cpwu>: port test \n
case4 <cpwu>: api_url test \n
case5 <cpwu>: base_url test \n
case6 <cpwu>: header test \n
case7 <cpwu>: big data test
'''
return
def rest_test_table(self, dbname: str, tbnum: int) -> None :
tdSql.execute(f"drop database if exists {dbname}")
tdSql.execute(f"create database if not exists {dbname} keep 3650 precision '{self.precision}' ")
tdSql.execute(f"use {dbname}")
tdSql.execute(
f'''
create stable {dbname}.stb1 (
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool,
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
)
tags(
tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool,
tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16)
)
'''
)
tdSql.execute(
f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(ttag1 int)"
)
for i in range(tbnum):
tdSql.execute(
f'''
create table {dbname}.t{i} using {dbname}.stb1
tags({i}, {i}, {1639032680000+i*10}, 'binary_{i}',{i},{random.choice([0, 1])}, {i},{i%32767},{i%127},'nchar_{i}')
'''
)
tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})")
pass
def rest_test_data(self, tbnum:int, data_row:int, basetime:int) -> None :
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
f"insert into t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
def check_err_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
try:
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
if resp_code != 200:
print(f"expect error occured, url: {url}, sql: {data}, error code is :{resp_code}")
return
status = resp["status"]
desc = resp["desc"]
if resp_code == 200 and status == "error":
print(f"expect error occured, url: {url}, sql: {data}, error is :{desc}")
return
else:
tdLog.exit(f"expect error not occured")
except requests.exceptions.InvalidHeader as e:
tdLog.success(f"expect error occured, request header error, header: {header}, error: {e}")
except requests.exceptions.InvalidURL as e:
tdLog.success(f"expect error occured, request url error, url: {url}, error: {e}")
except requests.exceptions.ConnectionError as e:
tdLog.success(f"expect error occured, request connection error,url: {url}, error: {e}")
except simplejson.errors.JSONDecodeError as e:
tdLog.success(f"expect error occured, request json error,url: {url}, header: {header}, error: {e}")
except Exception as e:
tdLog.success(f"expect error occured, url: {url}, header: {header}, {traceback.print_exc()}")
# finally:
# conn.close()
pass
def check_err_sql_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
try:
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
status = resp["status"]
desc = resp["desc"]
if resp_code == 200 and status == "error":
tdLog.success(f"expect error occured, url: {url}, sql: {data}, error is :{desc}")
return
else:
tdLog.exit(f"expect error not occured")
except Exception as e:
traceback.print_exc()
raise e
def check_current_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
conn = requests.post(url=url, data=data, headers=header)
try:
resp_code = conn.status_code
resp = conn.json()
status = resp["status"]
if resp_code == 200 and status == "succ":
tdLog.printNoPrefix(f"restfull run success! url:{url}, sql: {data}")
else:
tdLog.exit(f"restful api test failed, url:{url}, sql: {data}")
except:
tdLog.debug(f"resp_code: {resp_code}, url: {url}")
traceback.print_exc()
raise
pass
def check_case_res_data(self, query_msg: RestMsgInfo, data):
url, header, api = query_msg.full_url, query_msg.header, query_msg.api_url
try:
ts_col = []
stb_list = [f"describe {self.dbname}.stb1", f"describe {self.dbname}.stb2"]
for stb in stb_list:
conn = requests.post(url=url, data=stb, headers=header)
resp = conn.json()
for col in resp["data"]:
if "TIMESTAMP" == col[1]:
ts_col.append(col[0])
index_dict = defaultdict(int)
conn = requests.post(url=url, data=data, headers=header)
resp = conn.json()
if resp["data"] is None:
return
for index, meta in enumerate(resp["column_meta"]):
if meta[0] in ts_col:
index_dict[meta[0]] = index
if len(index_dict) < 1:
return
if self.precision == "ms" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) !=23:
print(res_data)
tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}")
return
if self.precision == "ms" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 12:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
if self.precision == "ms" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) != 29 and len(res_data[col_index]) != 28 and len(res_data[col_index]) != 27 and len(res_data[col_index]) != 25:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}, length is: {len(res_data[col_index])}"
)
return
if self.precision == "us" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) !=26:
print(res_data)
tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}")
return
if self.precision == "us" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 15:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
if self.precision == "us" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) != 32 and len(res_data[col_index]) != 31 and len(res_data[col_index]) != 30 and len(res_data[col_index]) != 28:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
if self.precision == "ns" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) !=29:
print(res_data)
tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}")
return
if self.precision == "ns" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 18:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
if self.precision == "ns" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) != 35 and len(res_data[col_index]) != 34 and len(res_data[col_index]) != 33 and len(res_data[col_index]) != 31:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
except:
traceback.print_exc()
raise
pass
def sql_case_current(self):
case_list = [
"show databases",
f"show {self.dbname}.stables",
f"show {self.dbname}.tables",
"select server_status()",
"select client_version()",
"select server_version()",
"select database()",
f"show create database {self.dbname}",
f"show create stable {self.dbname}.stb1",
f"select * from {self.dbname}.stb1",
f"select ts from {self.dbname}.stb1",
f"select _c0 from {self.dbname}.stb1",
f"select c1 from {self.dbname}.stb1",
f"select c2 from {self.dbname}.stb1",
f"select c3 from {self.dbname}.stb1",
f"select c4 from {self.dbname}.stb1",
f"select c5 from {self.dbname}.stb1",
f"select c6 from {self.dbname}.stb1",
f"select c7 from {self.dbname}.stb1",
f"select c8 from {self.dbname}.stb1",
f"select c9 from {self.dbname}.stb1",
f"select c10 from {self.dbname}.stb1",
f"select tbname from {self.dbname}.stb1",
f"select tag1 from {self.dbname}.stb1",
f"select tag2 from {self.dbname}.stb1",
f"select tag3 from {self.dbname}.stb1",
f"select tag4 from {self.dbname}.stb1",
f"select tag5 from {self.dbname}.stb1",
f"select tag6 from {self.dbname}.stb1",
f"select tag7 from {self.dbname}.stb1",
f"select tag8 from {self.dbname}.stb1",
f"select tag9 from {self.dbname}.stb1",
f"select tag10 from {self.dbname}.stb1",
f"select count(*) from {self.dbname}.stb1",
f"select count(c1) from {self.dbname}.stb1",
f"select avg(c1) from {self.dbname}.stb1",
f"select twa(c1) from {self.dbname}.stb1 group by tbname",
f"select sum(c1) from {self.dbname}.stb1",
f"select stddev(c1) from {self.dbname}.stb1",
f"select min(c1) from {self.dbname}.stb1",
f"select max(c1) from {self.dbname}.stb1",
f"select first(c1) from {self.dbname}.stb1",
f"select first(*) from {self.dbname}.stb1",
f"select last(c1) from {self.dbname}.stb1",
f"select last(*) from {self.dbname}.stb1",
f"select top(c1, 3) from {self.dbname}.stb1",
f"select bottom(c1, 3) from {self.dbname}.stb1",
f"select apercentile(c1, 50, 't-digest') from {self.dbname}.stb1",
f"select last_row(c1) from {self.dbname}.stb1",
f"select last_row(*) from {self.dbname}.stb1",
f"select interp(c1) from {self.dbname}.stb1 where ts=0 group by tbname",
f"select interp(c1) from {self.dbname}.stb1 where ts=0 fill(next) group by tbname",
f"select interp(c1) from {self.dbname}.stb1 where ts>0 and ts <100000000 every(5s) group by tbname",
f"select diff(c1) from {self.dbname}.stb1 group by tbname",
f"select derivative(c1, 10m, 0) from {self.dbname}.stb1 group by tbname",
f"select derivative(c1, 10m, 1) from {self.dbname}.stb1 group by tbname",
f"select spread(c1) from {self.dbname}.stb1",
f"select ceil(c1) from {self.dbname}.stb1",
f"select floor(c1) from {self.dbname}.stb1",
f"select round(c1) from {self.dbname}.stb1",
f"select c1*2+2%c2-c2/2 from {self.dbname}.stb1",
f"select max(c1) from {self.dbname}.stb1 where ts>'2021-12-05 18:25:41.136' and ts<'2021-12-05 18:25:44.13' interval(1s) sliding(500a) fill(NULL) group by tbname",
f"select max(c1) from {self.dbname}.stb1 where (c1 >=0 and c1 <> 0 and c2 is not null or c1 < -1 or (c2 between 1 and 10) ) and tbname like 't_' ",
f"select max(c1) from {self.dbname}.stb1 group by tbname order by ts desc slimit 2 soffset 2 limit 1 offset 0",
f"select max(c1) from {self.dbname}.stb1 group by c6 order by ts desc slimit 1 soffset 1 limit 1 offset 0 ",
f"select * from {self.dbname}.t1",
f"select ts from {self.dbname}.t1",
f"select _c0 from {self.dbname}.t1",
f"select c1 from {self.dbname}.t1",
f"select c2 from {self.dbname}.t1",
f"select c3 from {self.dbname}.t1",
f"select c4 from {self.dbname}.t1",
f"select c5 from {self.dbname}.t1",
f"select c6 from {self.dbname}.t1",
f"select c7 from {self.dbname}.t1",
f"select c8 from {self.dbname}.t1",
f"select c9 from {self.dbname}.t1",
f"select c10 from {self.dbname}.t1",
f"select tbname from {self.dbname}.t1",
f"select tag1 from {self.dbname}.t1",
f"select tag2 from {self.dbname}.t1",
f"select tag3 from {self.dbname}.t1",
f"select tag4 from {self.dbname}.t1",
f"select tag5 from {self.dbname}.t1",
f"select tag6 from {self.dbname}.t1",
f"select tag7 from {self.dbname}.t1",
f"select tag8 from {self.dbname}.t1",
f"select tag9 from {self.dbname}.t1",
f"select tag10 from {self.dbname}.t1",
f"select count(*) from {self.dbname}.t1",
f"select count(c1) from {self.dbname}.t1",
f"select avg(c1) from {self.dbname}.t1",
f"select twa(c1) from {self.dbname}.t1",
f"select sum(c1) from {self.dbname}.t1",
f"select stddev(c1) from {self.dbname}.t1",
f"select leastsquares(c1, 1, 1) from {self.dbname}.t1",
f"select min(c1) from {self.dbname}.t1",
f"select max(c1) from {self.dbname}.t1",
f"select first(c1) from {self.dbname}.t1",
f"select first(*) from {self.dbname}.t1",
f"select last(c1) from {self.dbname}.t1",
f"select last(*) from {self.dbname}.t1",
f"select top(c1, 3) from {self.dbname}.t1",
f"select bottom(c1, 3) from {self.dbname}.t1",
f"select percentile(c1, 50) from {self.dbname}.t1",
f"select apercentile(c1, 50, 't-digest') from {self.dbname}.t1",
f"select last_row(c1) from {self.dbname}.t1",
f"select last_row(*) from {self.dbname}.t1",
f"select interp(c1) from {self.dbname}.t1 where ts=0 ",
f"select interp(c1) from {self.dbname}.t1 where ts=0 fill(next)",
f"select interp(c1) from {self.dbname}.t1 where ts>0 and ts <100000000 every(5s)",
f"select diff(c1) from {self.dbname}.t1",
f"select derivative(c1, 10m, 0) from {self.dbname}.t1",
f"select derivative(c1, 10m, 1) from {self.dbname}.t1",
f"select spread(c1) from {self.dbname}.t1",
f"select ceil(c1) from {self.dbname}.t1",
f"select floor(c1) from {self.dbname}.t1",
f"select round(c1) from {self.dbname}.t1",
f"select c1*2+2%c2-c2/2 from {self.dbname}.t1",
f"select max(c1) from {self.dbname}.t1 where ts>'2021-12-05 18:25:41.136' and ts<'2021-12-05 18:25:44.13' interval(1s) sliding(500a) fill(NULL)",
f"select max(c1) from {self.dbname}.t1 where (c1 >=0 and c1 <> 0 and c2 is not null or c1 < -1 or (c2 between 1 and 10) ) and c10 like 'nchar___1' ",
f"select max(c1) from {self.dbname}.t1 group by c6 order by ts desc ",
f"select stb1.c1, stb2.c1 from {self.dbname}.stb1 stb1, {self.dbname}.stb2 stb2 where stb1.ts=stb2.ts and stb1.tag1=stb2.ttag1",
f"select t1.c1, t2.c1 from {self.dbname}.t1 t1, {self.dbname}.t2 t2 where t1.ts=t2.ts",
f"select c1 from (select c2 c1 from {self.dbname}.stb1) ",
f"select c1 from {self.dbname}.t1 union all select c1 from {self.dbname}.t2"
]
return case_list
def sql_case_err(self):
case_list = [
"show database",
f"select percentile(c1, 50) from {self.dbname}.stb1 group by tbname",
f"select leastsquares(c1, 1, 1) from {self.dbname}.stb1",
]
return case_list
def port_case_current(self):
case_list = [6041]
return case_list
def port_case_err(self):
case_list = [
6030,
6051,
666666666,
None,
"abcd"
]
return case_list
def api_case_current(self):
case_List = [
"/rest/sql",
f"/rest/sql/{self.dbname}",
"/rest/sqlt",
f"/rest/sqlt/{self.dbname}",
"/rest/sqlutc",
f"/rest/sqlutc/{self.dbname}"
]
return case_List
def api_case_err(self):
case_list = [
"",
"/rest1/sql",
"/rest/sqlsqltsqlutc",
1,
["/rest", "/sql"],
"/influxdb/v1/write",
"/opentsdb/v1/put/json/db",
"/opentsdb/v1/put/telnet/db",
"/rest*",
"*"
]
return case_list
def header_case_current(self):
case_list = [
{'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='},
{'Authorization': 'Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04'}
]
return case_list
def header_case_err(self):
case_list = [
{'Authorization': 'Basic '},
{'Authorization': 'Taosd /root/taosdata'},
{'Authorization': True}
]
return case_list
def run_case_api_err(self):
err_cases = self.api_case_err()
count = 0
data = "show databases"
for case in err_cases:
print(f"err api case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, api_url=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_port_err(self):
err_cases = self.port_case_err()
count = 0
data = "show databases"
for case in err_cases:
print(f"err port case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, port=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_header_err(self):
err_cases = self.header_case_err()
count = 0
data = "show databases"
for case in err_cases:
print(f"err header case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, header=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_sql_err(self):
err_cases = self.sql_case_err()
count = 0
for case in err_cases:
print(f"err sql case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url)
self.check_err_sql_case(query_msg=query_msg, data=case)
count += 1
pass
def run_case_port_current(self):
current_cases = self.port_case_current()
count = 0
data = "show databases"
for case in current_cases:
print(f"current port case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, port=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_api_current(self):
current_cases = self.api_case_current()
count = 0
data = "show databases"
for case in current_cases:
print(f"current api case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, api_url=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_header_current(self):
current_cases = self.header_case_current()
count = 0
data = "show databases"
for case in current_cases:
print(f"current header case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, header=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_sql_current(self):
current_cases = self.sql_case_current()
count = 0
api_cases = self.api_case_current()
for case in current_cases:
print(f"current sql case{count}: ", end="")
for api in api_cases:
query_msg = RestMsgInfo(base_url=self.base_url, api_url=api)
self.check_current_case(query_msg=query_msg, data=case)
self.check_case_res_data(query_msg=query_msg, data=case)
count += 1
pass
def run_case_err(self):
self.run_case_api_err()
self.run_case_port_err()
self.run_case_header_err()
self.run_case_sql_err()
pass
def run_case_current(self):
self.run_case_api_current()
self.run_case_port_current()
self.run_case_header_current()
self.run_case_sql_current()
pass
def run_all_case(self):
self.run_case_err()
self.run_case_current()
pass
def set_default_args(self):
nowtime = int(round(time.time() * 1000))
url = "127.0.0.1"
per_table_rows = 100
tbnum = 10
database_name = "db"
precision ="ms"
clear_data = True
config_default = {
"base_url" : url,
"precision" : precision,
"clear_data" : clear_data,
"database_name": database_name,
"tbnum" : tbnum,
"data_row" : per_table_rows,
"basetime" : nowtime,
"all_case" : False,
"all_err" : False,
"all_current" : True,
"err_case" : {
"port_err" : True,
"api_err" : True,
"header_err" : True,
"sql_err" : True,
},
"current_case" : {
"port_current" : True,
"api_current" : True,
"header_current" : True,
"sql_current" : True,
}
}
config_file_name = f"{os.path.dirname(os.path.abspath(__file__))}/rest_query_config.json"
with open(config_file_name, "w") as f:
json.dump(config_default, f)
return config_file_name
def run(self):
config_file = f"{os.path.dirname(os.path.abspath(__file__))}/rest_query_config.json"
if not os.path.isfile(config_file):
config_file = self.set_default_args()
with open(config_file, "r", encoding="utf-8") as f:
cfg = json.load(f)
tbnum = cfg["tbnum"]
data_row = cfg["data_row"]
basetime = cfg["basetime"]
self.dbname = cfg["database_name"]
self.base_url = cfg["base_url"]
self.precision = cfg["precision"]
clear_data = True if cfg["clear_data"] else False
if clear_data:
self.rest_test_table(dbname=self.dbname, tbnum=tbnum)
self.rest_test_data(tbnum=tbnum, data_row=data_row, basetime=basetime)
run_all_case = True if cfg["all_case"] else False
run_all_err_case = True if cfg["all_err"] else False
run_all_current_case = True if cfg["all_current"] else False
run_port_err_case = True if cfg["err_case"]["port_err"] else False
run_api_err_case = True if cfg["err_case"]["api_err"] else False
run_header_err_case = True if cfg["err_case"]["header_err"] else False
run_sql_err_case = True if cfg["err_case"]["sql_err"] else False
run_port_current_case = True if cfg["current_case"]["port_current"] else False
run_api_current_case = True if cfg["current_case"]["api_current"] else False
run_header_current_case = True if cfg["current_case"]["header_current"] else False
run_sql_current_case = True if cfg["current_case"]["sql_current"] else False
if not (run_all_err_case | run_all_current_case | run_port_err_case | run_api_err_case |
run_header_err_case | run_sql_err_case | run_port_current_case | run_api_current_case
| run_header_current_case | run_sql_current_case):
run_all_case = True
if run_all_err_case & run_all_current_case:
run_all_case = True
if run_all_case:
self.run_all_case()
return
if run_all_err_case :
self.run_case_err()
return
if run_all_current_case:
self.run_case_current()
return
if run_port_err_case:
self.run_case_port_err()
if run_api_err_case:
self.run_case_api_err()
if run_header_err_case:
self.run_case_header_err()
if run_sql_err_case:
self.run_case_sql_err()
if run_port_current_case:
self.run_case_port_current()
if run_api_current_case:
self.run_case_api_current()
if run_header_current_case:
self.run_case_header_current()
if run_sql_current_case:
self.run_case_sql_current()
pass
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import subprocess
class TDTestCase:
def caseDescription(self):
'''
case1<pxiao>: [TD-11977] start taosdump without taosd
case1<pxiao>: [TD-11977] start taosBenchmark without taosd
case1<pxiao>: [TD-11977] start taosAdaptor without taosd
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tools = ["taosdump", "taosBenchmark", "taosAdaptor"]
tdDnodes.stop(1)
for tool in tools:
path = tdDnodes.dnodes[1].getBuildPath(tool)
try:
path += "/build/bin/"
print(f"{path}{tool}")
if tool == "taosBenchmark":
os.system(f"{path}{tool} -y")
else:
os.system(f"{path}{tool}")
except:
pass
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
python3 test.py -f 0-management/1-stable/create_col_tag.py python3 test.py -f 0-management/1-stable/create_col_tag.py
python3 test.py -f 4-taosAdapter/taosAdapter_query.py
python3 test.py -f 4-taosAdapter/taosAdapter_insert.py
#python3 test.py -f 2-query/9-others/TD-11389.py # this case will run when this bug fix TD-11389 #python3 test.py -f 2-query/9-others/TD-11389.py # this case will run when this bug fix TD-11389
python3 test.py -f 5-taos-tools/taosdump/basic.py
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册