提交 2d45658f 编写于 作者: G Ganlin Zhao

Merge branch 'develop' into feature/TD-13414

......@@ -96,6 +96,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool {
case (!firing) && (alert.State == AlertStateFiring):
alert.State = AlertStateWaiting
alert.EndsAt = time.Now()
return false
case firing && (alert.State == AlertStateWaiting):
alert.StartsAt = time.Now()
......
......@@ -121,7 +121,7 @@ TDengine RESTful 接口详情请参考[官方文档](https://www.taosdata.com/cn
### 使用 Docker 容器运行 TDengine server 和 taosAdapter
在 TDegnine 2.4.0.0 之后版本的 Docker 容器,开始提供一个独立运行的组件 taosAdapter,代替之前版本 TDengine 中 taosd 进程中内置的 http server。taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。在新版本 Docker 镜像中,默认启用了 taosAdapter,也可以使用 docker run 命令中设置 TAOS_DISABLE_ADAPTER=true 来禁用 taosAdapter;也可以在 docker run 命令中单独使用taosAdapter,而不运行 taosd 。
在 TDengine 2.4.0.0 之后版本的 Docker 容器,开始提供一个独立运行的组件 taosAdapter,代替之前版本 TDengine 中 taosd 进程中内置的 http server。taosAdapter 支持通过 RESTful 接口对 TDengine server 的数据写入和查询能力,并提供和 InfluxDB/OpenTSDB 兼容的数据摄取接口,允许 InfluxDB/OpenTSDB 应用程序无缝移植到 TDengine。在新版本 Docker 镜像中,默认启用了 taosAdapter,也可以使用 docker run 命令中设置 TAOS_DISABLE_ADAPTER=true 来禁用 taosAdapter;也可以在 docker run 命令中单独使用taosAdapter,而不运行 taosd 。
注意:如果容器中运行 taosAdapter,需要根据需要映射其他端口,具体端口默认配置和修改方法请参考[taosAdapter文档](https://github.com/taosdata/taosadapter/blob/develop/README-CN.md)
......@@ -227,7 +227,7 @@ taos>
- **查看数据库。**
```bash
$ taos> show databases;
$ taos> SHOW DATABASES;
name | created_time | ntables | vgroups | ···
test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
......@@ -240,7 +240,7 @@ $ taos> show databases;
$ taos> use test;
Database changed.
$ taos> show stables;
$ taos> SHOW STABLES;
name | created_time | columns | tags | tables |
============================================================================================
meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
......@@ -251,10 +251,7 @@ Query OK, 1 row(s) in set (0.003259s)
- **查看表,限制输出十条。**
```bash
$ taos> select * from test.t0 limit 10;
DB error: Table does not exist (0.002857s)
taos> select * from test.d0 limit 10;
taos> SELECT * FROM test.d0 LIMIT 10;
ts | current | voltage | phase |
======================================================================================
2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
......@@ -274,7 +271,7 @@ Query OK, 10 row(s) in set (0.016791s)
- **查看 d0 表的标签值。**
```bash
$ taos> select groupid, location from test.d0;
$ taos> SELECT groupid, location FROM test.d0;
groupid | location |
=================================
0 | shanghai |
......@@ -292,7 +289,7 @@ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
然后可以使用 taos shell 查询 taosAdapter 自动创建的数据库 statsd 和 超级表 foo 中的内容:
```
taos> show databases;
taos> SHOW DATABASES;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
......@@ -302,13 +299,13 @@ Query OK, 2 row(s) in set (0.002112s)
taos> use statsd;
Database changed.
taos> show stables;
taos> SHOW STABLES;
name | created_time | columns | tags | tables |
============================================================================================
foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 |
Query OK, 1 row(s) in set (0.001160s)
taos> select * from foo;
taos> SELECT * FROM foo;
ts | value | metric_type |
=======================================================================================
2021-12-28 09:21:48.840820836 | 1 | counter |
......
......@@ -118,8 +118,8 @@ TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的
| 序号 | 测量(metric) | 值名称 | 类型 | tag1 | tag2 | tag3 | tag4 | tag5 |
| ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ |
| 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a |
| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a |
| 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a |
| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a |
| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source |
TDengine 要求存储的数据具有数据模式,即写入数据之前需创建超级表并指定超级表的模式。对于数据模式的建立,你有两种方式来完成此项工作:1)充分利用 TDengine 对 OpenTSDB 的数据原生写入的支持,调用 TDengine 提供的 API 将(文本行或 JSON 格式)数据写入,并自动化地建立单值模型。采用这种方式不需要对数据写入应用进行较大的调整,也不需要对写入的数据格式进行转换。
......@@ -198,7 +198,7 @@ DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参
2)在系统全负载运行下,如果有足够的剩余计算和 IO 资源,可以建立多线程的导入机制,最大限度地提升数据迁移的效率。考虑到数据解析对于 CPU 带来的巨大负载,需要控制最大的并行任务数量,以避免因导入历史数据而触发的系统整体过载。
由于 TDegnine 本身操作简易性,所以不需要在整个过程中进行索引维护、数据格式的变化处理等工作,整个过程只需要顺序执行即可。
由于 TDengine 本身操作简易性,所以不需要在整个过程中进行索引维护、数据格式的变化处理等工作,整个过程只需要顺序执行即可。
当历史数据完全导入到 TDengine 以后,此时两个系统处于同时运行的状态,之后便可以将查询请求切换到 TDengine 上,从而实现无缝的应用切换。
......
......@@ -197,7 +197,7 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT
Press enter key to continue or Ctrl-C to stop
```
After enter, this command will automatically create a super table `meters` under the database test, there are 10,000 tables under this super table, the table name is "d0" to "d9999", each table has 10,000 records, each record has four fields (ts, current, voltage, phase), the time stamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999", each table has a tag location and groupId, groupId is set from 1 to 10 and location is set to "beijing" or "shanghai".
After enter, this command will automatically create a super table `meters` under the database test, there are 10,000 tables under this super table, the table name is "d0" to "d9999", each table has 10,000 records, each record has four fields (ts, current, voltage, phase), the time stamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999", each table has a tag location and groupid, groupid is set from 1 to 10 and location is set to "beijing" or "shanghai".
It takes about a few minutes to execute this command and ends up inserting a total of 100 million records.
......@@ -217,7 +217,7 @@ taos>
- **View the database.**
```bash
$ taos> show databases;
$ taos> SHOW DATABASES;
name | created_time | ntables | vgroups | ···
test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
......@@ -227,10 +227,10 @@ $ taos> show databases;
- **View Super Tables.**
```bash
$ taos> use test;
$ taos> USE test;
Database changed.
$ taos> show stables;
$ taos> SHOW STABLES;
name | created_time | columns | tags | tables |
============================================================================================
meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
......@@ -241,10 +241,7 @@ Query OK, 1 row(s) in set (0.003259s)
- **View the table and limit the output to 10 entries.**
```bash
$ taos> select * from test.t0 limit 10;
DB error: Table does not exist (0.002857s)
taos> select * from test.d0 limit 10;
taos> SELECT * FROM test.d0 LIMIT 10;
ts | current | voltage | phase |
======================================================================================
2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
......@@ -264,7 +261,7 @@ Query OK, 10 row(s) in set (0.016791s)
- **View the tag values for the d0 table.**
```bash
$ taos> select groupid, location from test.d0;
$ taos> SELECT groupid, location FROM test.d0;
groupid | location |
=================================
0 | shanghai |
......@@ -283,23 +280,23 @@ echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
Then you can use the taos shell to query the taosAdapter automatically created database statsd and the contents of the super table foo.
```
taos> show databases;
taos> SHOW DATABASES;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2021-12-28 09:18:55.765 | 12 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
statsd | 2021-12-28 09:21:48.841 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.002112s)
taos> use statsd;
taos> USE statsd;
Database changed.
taos> show stables;
taos> SHOW STABLES;
name | created_time | columns | tags | tables |
============================================================================================
foo | 2021-12-28 09:21:48.894 | 2 | 1 | 1 |
Query OK, 1 row(s) in set (0.001160s)
taos> select * from foo;
taos> SELECT * FROM foo;
ts | value | metric_type |
=======================================================================================
2021-12-28 09:21:48.840820836 | 1 | counter |
......
......@@ -570,13 +570,15 @@ for row in results:
- Write data
```python
import taos
import datetime
conn = taos.connect()
c1 = conn.cursor()
# Create a database
c1.execute('create database db')
c1.execute('use db')
c1.execute('create database if not exists db1')
c1.execute('use db1')
# Create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
c1.execute('create table if not exists tb (ts timestamp, temperature int, humidity float)')
# Insert data
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
......@@ -584,9 +586,11 @@ for row in results:
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows += c1.execute(' '.join(sqlcmd))
print("inserted %s records" % affected_rows)
```
- Query data
......@@ -599,12 +603,12 @@ for row in results:
numOfRows = c1.rowcount
numOfCols = len(c1.description)
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]))
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]))
# Use cursor loop directly to pull query result
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]))
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]))
```
#### Query API
......
......@@ -863,7 +863,9 @@ TDengine supports aggregations over data, they are listed below:
Applicable Fields: All types except timestamp.
Supported version: Version after 2.6.0 .
Note: Since the amount of returned data is unknown, considering the memory factor, in order to return the result normally, it is recommended that the amount of non repeated data is 100000, otherwise an error will be reported.
Example:
```mysql
taos> select voltage from d002;
......@@ -886,7 +888,9 @@ TDengine supports aggregations over data, they are listed below:
```mysql
SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
Function: The hyperloglog algorithm is used to return the cardinality of a column. In the case of large amount of data, the algorithm can significantly reduce the occupation of memory, but the cardinality is an estimated value, and the standard error is 0.81%.
Function:
- The hyperloglog algorithm is used to return the cardinality of a column. In the case of large amount of data, the algorithm can significantly reduce the occupation of memory, but the cardinality is an estimated value, and the standard error(the standard error is the standard deviation of the average of multiple experiments, not the error with the real result) is 0.81%.
- When the amount of data is small, the algorithm is not very accurate. You can use the method like this: select count(data) from (select unique(col) as data from table).
Return Data Type:Integer.
......@@ -1215,8 +1219,10 @@ TDengine supports aggregations over data, they are listed below:
Supported version: Version after 2.6.0 .
Note: This function can be applied to ordinary tables and super tables. Cannot be used with window operations,such as interval/state_window/session_window.
Note:
- This function can be applied to ordinary tables and super tables. Cannot be used with window operations,such as interval/state_window/session_window.
- Since the amount of returned data is unknown, considering the memory factor, in order to return the result normally, it is recommended that the amount of non repeated data is 100000, otherwise an error will be reported.
Example:
```mysql
taos> select ts,voltage from unique1;
......@@ -1296,6 +1302,412 @@ TDengine supports aggregations over data, they are listed below:
Query OK, 1 row(s) in set (0.000836s)
```
- **ASIN**
```mysql
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the arc-sine of the input value.
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **ACOS**
```mysql
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the arc-cosine of the input value.
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **ATAN**
```mysql
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the arc-tangent of the input value.
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **SIN**
```mysql
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the sine of the input value.
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **COS**
```mysql
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the cosine of the input value.
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **TAN**
```mysql
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the tangent of the input value.
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **POW**
```mysql
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the input value raised to the specified power of the second argument
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **LOG**
```mysql
SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the logarithm of the input value with base
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **ABS**
```mysql
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the absolute value of the input value
Output Data Type: If the input data is an integer numeric value, the output data type is ubigint. If the input data is a float or double value, the output data type is double
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **SQRT**
```mysql
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the square root value of the input value
Output Data Type: DOUBLE.
Input: applies to value of all types except timestamp, binary, nchar, and bool. Can not apply to tag column of super table.
Embedded Query Support: Both Outer Query and Inner Query
Notes:
If input value is NULL, the output value is NULL.
It is a scalar function and can not be used together with aggregate function
Applies to columns of normal table, child table and super table
Supported after version 2.6.0.x
- **CAST**
```mysql
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Converts a value into as a specific data type of type_name.
Output Data Type: type_name specificied. Supported types include BIGINT, BINARY(N), TIMESTAMP and NCHAR(N) and BIGINT UNSIGNED
Input: Normal column, constant, scalar function and the arithmetic computation(+,-,*,/,%) among them. Input data type includes BOOL, TINYINT, SMALLINT, INT, BIGINT, FLOAT, DOUBLE, BINARY(M), TIMESTAMP, NCHAR(M), TINYINT UNSIGNED, SMALLINT UNSIGNED, INT UNSIGNED, and BIGINT UNSIGNED
Notes:
Reports error for unsupported cast
It is a scalar function and its output is NULL for input NULL
Supported after version 2.6.0.x
- **CONCAT**
```mysql
SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the string from concatenating the arguments
Output Data Type: With binary inputs, the output data type is binary. With nchar inputs, the output data type is nchar.
Input: all inputs shall be of data type binary or nchar. Can not apply to tag columns.
Notes:
If one of the string inputs is NULL, the resulting output is NULL.
The function takes 2 to 8 string values as input. all inputs must be of the same data type.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **CONCAT_WS**
```
SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the string from concatenating the arguments with separator.
Output Data Type: With binary inputs, the output data type is binary. With nchar inputs, the output data type is nchar.
Input: all inputs shall be of data type binary or nchar. Can not apply to tag columns.
Notes:
Returns NULL when the separator is NULL. If the separator is not NULL and all the other string values are NULL, the result is an empty string.
The function takes 3 to 9 string values as input. all inputs must be of the same data type.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **LENGTH**
```
SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the length of the string measure in bytes
Output Data Type: INT。
Input: BINARY or NCHAR values. Can not apply to tag columns
Notes:
Returns NULL when input is NULL.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **CHAR_LENGTH**
```
SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the length of the string measure in characters
Output Data Type: INT。
Input: BINARY or NCHAR values. Can not apply to tag columns
Notes:
Returns NULL when input is NULL.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **LOWER**
```
SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the lower case of input value
Output Data Type: BINARY or NCHAR. Same data type as Input.
Input: BINARY or NCHAR values. Can not apply to tag columns
Notes:
Returns NULL when input is NULL.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **UPPER**
```
SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
Function: Returns the upper case of input value
Output Data Type: BINARY or NCHAR. Same data type as Input.
Input: BINARY or NCHAR values. Can not apply to tag columns
Notes:
Returns NULL when input is NULL.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **LTRIM**
```
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
Function: removes leading spaces from a string
Output Data Type: BINARY or NCHAR. Same data type as Input.
Input: BINARY or NCHAR values. Can not apply to tag columns
Notes:
Returns NULL when input is NULL.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **RTRIM**
```
SELECT RTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
Function: removes trailing spaces from a string
Output Data Type: BINARY or NCHAR. Same data type as Input.
Input: BINARY or NCHAR values. Can not apply to tag columns
Notes:
Returns NULL when input is NULL.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **SUBSTR**
```
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
```
Function: extracts substring from a string str, starting from pos and extracting len characters.
Output Data Type: BINARY or NCHAR. Same data type as Input.
Input: BINARY or NCHAR values. Can not apply to tag columns
Notes:
Returns NULL when input is NULL.
Input pos can be negative or positive. If it is a positive number, this function extracts from the beginning of the string. If it is a negative number, this function extracts from the end of the string
If input len is ommited, the output is whole substring starting from pos.
This function applies to normal table, child table and super table
This function applies to bother out query and inner query
Supported after version 2.6.0.x
- **Four Operations**
```mysql
......
......@@ -63,7 +63,7 @@ This allows collectd to push the data to taosAdapter using the push to OpenTSDB
After the data has been written to TDengine properly, you can adapt Grafana to visualize the data written to TDengine. There is a connection plugin for Grafana in the TDengine installation directory connector/grafanaplugin. The way to use this plugin is simple.
First copy the entire dist directory under the grafanaplugin directory to Grafana's plugins directory (the default address is /var/lib/grafana/plugins/), and then restart Grafana to see the TDengine data source under the Add Data Source menu.
First copy the entire `dist` directory under the grafanaplugin directory to Grafana's plugins directory (the default address is /var/lib/grafana/plugins/), and then restart Grafana to see the TDengine data source under the Add Data Source menu.
```shell
sudo cp -r . /var/lib/grafana/plugins/tdengine
......@@ -144,15 +144,15 @@ The steps are as follows: the name of the metrics is used as the name of the TDe
Create 3 super tables in TDengine.
```sql
create stable memory(ts timestamp, val float) tags(host binary(12)memory_type binary(20), memory_type_instance binary(20), source binary(20));
create stable swap(ts timestamp, val double) tags(host binary(12), swap_type binary(20), swap_type_binary binary(20), source binary(20));
create stable disk(ts timestamp, val double) tags(host binary(12), disk_point binary(20), disk_instance binary(20), disk_type binary(20), source binary(20));
CREATE STABLE memory(ts timestamp, val float) tags(host binary(12)memory_type binary(20), memory_type_instance binary(20), source binary(20));
CREATE STABLE swap(ts timestamp, val double) tags(host binary(12), swap_type binary(20), swap_type_binary binary(20), source binary(20));
CREATE STABLE disk(ts timestamp, val double) tags(host binary(12), disk_point binary(20), disk_instance binary(20), disk_type binary(20), source binary(20));
```
For sub-tables use dynamic table creation as shown below:
```sql
insert into memory_vm130_memory_bufferred_collectd using memory tags(vm130, memory, 'buffer', 'collectd') values(1632979445, 3.0656);
INSERT INTO memory_vm130_memory_buffered_collectd USING memory TAGS(vm130, memory, 'buffer', 'collectd') VALUES(1632979445, 3.0656);
```
Eventually about 340 sub-tables and 3 super-tables will be created in the system. Note that if the use of concatenated tagged values causes the sub-table names to exceed the system limit (191 bytes), then some encoding (e.g. MD5) needs to be used to convert them to an acceptable length.
......@@ -168,7 +168,7 @@ Data is subscribed from the message queue and an adapted writer is started to wr
After the data starts to be written for a sustained period, SQL statements can be used to check whether the amount of data written meets the expected write requirements. The following SQL statement is used to count the amount of data.
```sql
select count(*) from memory
SELECT COUNT(*) FROM memory
```
After completing the query, if the written data does not differ from the expected one, and there are no abnormal error messages from the writing program itself, then you can confirm that the data writing is complete and valid.
......@@ -213,7 +213,7 @@ Notes.
1. the value within the Interval needs to be the same as the interval value of the outer query.
As the interpolation of values in OpenTSDB uses linear interpolation, use fill(linear) to declare the interpolation type in the interpolation clause. The following functions with the same interpolation requirements are handled by this method. 3.
As the interpolation of values in OpenTSDB uses linear interpolation, use FILL(linear) to declare the interpolation type in the interpolation clause. The following functions with the same interpolation requirements are handled by this method. 3.
2. The 20s parameter in Interval means that the inner query will generate results in a 20-second window. In a real query, it needs to be adjusted to the time interval between different records. This ensures that the interpolation results are generated equivalently to the original data.
......@@ -226,7 +226,7 @@ Equivalent function: count
Example.
select count(*) from super_table_name;
SELECT COUNT(*) FROM super_table_name;
**Dev**
......@@ -234,7 +234,7 @@ Equivalent function: stddev
Example.
Select stddev(val) from table_name
SELECT STDDEV(val) FROM table_name
**Estimated percentiles**
......@@ -242,7 +242,7 @@ Equivalent function: apercentile
Example.
Select apercentile(col1, 50, “t-digest”) from table_name
SELECT APERCENTILE(col1, 50, “t-digest”) FROM table_name
Remark.
......@@ -254,7 +254,7 @@ Equivalent function: first
Example.
Select first(col1) from table_name
SELECT FIRST(col1) FROM table_name
**Last**
......@@ -262,7 +262,7 @@ Equivalent function: last
Example.
Select last(col1) from table_name
SELECT LAST(col1) FROM table_name
**Max**
......@@ -270,7 +270,7 @@ Equivalent function: max
Example.
Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
SELECT MAX(value) FROM (SELECT FIRST(val) value FROM table_name INTERVAL(10s) FILL(linear)) INTERVAL(10s)
Note: The Max function requires interpolation, for the reasons given above.
......@@ -280,13 +280,13 @@ Equivalent function: min
Example.
Select min(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s);
SELECT MIN(value) FROM (select first(val) value FROM table_name INTERVAL(10s) FILL(linear)) INTERVAL(10s);
**MinMax**
Equivalent function: max
Select max(val) from table_name
SELECT max(val) FROM table_name
Note: This function does not require interpolation, so it can be calculated directly.
......@@ -294,7 +294,7 @@ Note: This function does not require interpolation, so it can be calculated dire
Equivalent function: min
Select min(val) from table_name
SELECT min(val) FROM table_name
Note: This function does not require interpolation, so it can be calculated directly.
......@@ -308,7 +308,7 @@ Note:
Equivalent function: sum
Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
SELECT MAX(value) FROM (SELECT FIRST(val) value FROM table_name INTERVAL(10s) FILL(linear)) INTERVAL(10s)
Note: This function does not require interpolation, so it can be calculated directly.
......@@ -316,7 +316,7 @@ Note: This function does not require interpolation, so it can be calculated dire
Equivalent function: sum
Select sum(val) from table_name
SELECT SUM(val) FROM table_name
Note: This function does not require interpolation, so it can be calculated directly.
......@@ -356,7 +356,7 @@ Combining the above formula and bringing the parameters into the calculation for
### Storage device selection considerations
The hard disk should be used with a better random read performance hard disk device, if you can have SSD, consider using SSD as much as possible. better random read performance of the disk is extremely helpful to improve the system query performance and can improve the overall query response performance of the system. To obtain better query performance, the performance index of single-threaded random read IOPS of the hard disk device should not be lower than 1000, it is better to reach 5000 IOPS or more. To obtain an evaluation of the current device random read IO performance, it is recommended that fio software be used to evaluate its operational performance (see Appendix 1 for details on how to use it) to confirm whether it can meet the large file random read performance requirements.
The hard disk should be used with a better random read performance hard disk device, if you can have SSD, consider using SSD as much as possible. better random read performance of the disk is extremely helpful to improve the system query performance and can improve the overall query response performance of the system. To obtain better query performance, the performance index of single-threaded random read IOPS of the hard disk device should not be lower than 1000, it is better to reach 5000 IOPS or more. To obtain an evaluation of the current device random read IO performance, it is recommended that `fio` software be used to evaluate its operational performance (see Appendix 1 for details on how to use it) to confirm whether it can meet the large file random read performance requirements.
Hard disk write performance has little impact on TDengine; TDengine writes in append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs, in general, can meet TDengine's requirements for disk write performance well.
......@@ -390,7 +390,7 @@ FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. The specific meani
Follow the same steps to set the parameters on the node that needs to run and start the taosd service, then add the Dnode to the cluster.
Finally, start taos and execute the command show dnodes, if you can see all the nodes that have joined the cluster, then the cluster is successfully built. For the specific operation procedure and notes, please refer to the document [TDengine Cluster Installation, Management](https://www.taosdata.com/cn/documentation/cluster).
Finally, start taos and execute the command `SHOW DNODES`, if you can see all the nodes that have joined the cluster, then the cluster is successfully built. For the specific operation procedure and notes, please refer to the document [TDengine Cluster Installation, Management](https://www.taosdata.com/cn/documentation/cluster).
## Appendix 4: Super table names
......
......@@ -3359,11 +3359,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
} else {
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
int64_t numRowsSelected = GET_INT64_VAL(val);
if (functionId != TSDB_FUNC_UNIQUE && (numRowsSelected <= 0 || numRowsSelected > 100)) { // todo use macro
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
}
if(functionId == TSDB_FUNC_UNIQUE){ // consider of memory size
if(pSchema->bytes < 10){
GET_INT64_VAL(val) = MAX_UNIQUE_RESULT_ROWS * 100;
......@@ -3373,6 +3368,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
GET_INT64_VAL(val) = MAX_UNIQUE_RESULT_ROWS;
}
}
int64_t numRowsSelected = GET_INT64_VAL(val);
if (functionId != TSDB_FUNC_UNIQUE && (numRowsSelected <= 0 || numRowsSelected > 100)) { // todo use macro
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
}
// todo REFACTOR
// set the first column ts for top/bottom query
SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
......@@ -3385,9 +3385,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
aAggs[TSDB_FUNC_TS].name, pExpr);
colIndex += 1; // the first column is ts
getResultDataInfo(pSchema->type, pSchema->bytes, functionId, (int32_t)numRowsSelected, &resultType,
&resultSize, &interResult, 0, false, pUdfInfo);
pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd),
resultSize, false);
interResult, false);
if (functionId == TSDB_FUNC_TAIL){
int64_t offset = 0;
if (taosArrayGetSize(pItem->pNode->Expr.paramList) == 3){
......@@ -7054,7 +7055,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomUniqueQuery(pQueryInfo)) {
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomUniqueQuery(pQueryInfo)){
bool validOrder = false;
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
......@@ -7065,15 +7066,8 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (!validOrder) {
return invalidOperationMsg(pMsgBuf, msg7);
}
if (udf) {
return invalidOperationMsg(pMsgBuf, msg11);
}
pQueryInfo->groupbyExpr.orderType = pItem->sortOrder;
}
if (isTopBottomUniqueQuery(pQueryInfo)) {
}else if (isTopBottomUniqueQuery(pQueryInfo)) {
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
......@@ -7093,14 +7087,16 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg5);
}
}
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
}else{
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
if (udf) {
return invalidOperationMsg(pMsgBuf, msg11);
}
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else {
// handle the temp table order by clause. You can order by any single column in case of the temp table, created by
// inner subquery.
......
......@@ -1491,7 +1491,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
pex->base.param[2].nType = TSDB_DATA_TYPE_INT;
pex->base.param[2].i64 = pInputQI->order.order;
}
}
}
}
tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute the main query while all nest queries are ready", pSql->self, pSql->self);
......
using TDengineDriver;
using Test.UtilsTools;
using System;
using System.Collections.Generic;
using Xunit;
using Test.UtilsTools.DataSource;
using System.Threading;
using Xunit.Abstractions;
using Test.Fixture;
using Test.Case.Attributes;
namespace Cases
{
[TestCaseOrderer("XUnit.Case.Orderers.TestExeOrderer", "Cases.ExeOrder")]
[Collection("Database collection")]
public class SubscribeCases
{
DatabaseFixture database;
private readonly ITestOutputHelper output;
public SubscribeCases(DatabaseFixture fixture, ITestOutputHelper output)
{
this.database = fixture;
this.output = output;
}
/// <author>xiaolei</author>
/// <Name>SubscribeCases.ConsumeFromBegin</Name>
/// <describe>Subscribe a table and consume from beginning.</describe>
/// <filename>Subscribe.cs</filename>
/// <result>pass or failed </result>
[Fact(DisplayName = "SubscribeCases.ConsumeFromBegin()"), TestExeOrder(1), Trait("Category", "Without callback")]
public void ConsumeFromBegin()
{
IntPtr conn = database.conn;
IntPtr _res = IntPtr.Zero;
var tableName = "subscribe_from_begin";
var createSql = $"create table if not exists {tableName}(ts timestamp,bl bool,i8 tinyint,i16 smallint,i32 int,i64 bigint,bnr binary(50),nchr nchar(50))tags(t_i32 int,t_bnr binary(50),t_nchr nchar(50))";
var dropSql = $"drop table if exists {tableName}";
var colData = new List<Object>{1646150410100,true,1,11,1111,11111111,"value one","值壹",
1646150410200,true,2,22,2222,22222222,"value two","值贰",
1646150410300,false,3,33,3333,33333333,"value three","值三",
};
var colData2 = new List<Object>{1646150410400,false,4,44,4444,44444444,"value three","值肆",
1646150410500,true,5,55,5555,55555555,"value one","值伍",
1646150410600,true,6,66,6666,66666666,"value two","值陆",
};
var tagData = new List<Object> { 1, "tag_one", "标签壹" };
var tagData2 = new List<Object> { 2, "tag_two", "标签贰" };
String insertSql = UtilsTools.ConstructInsertSql(tableName + "_s01", tableName, colData, tagData, 3);
String insertSql2 = UtilsTools.ConstructInsertSql(tableName + "_s02", tableName, colData2, tagData2, 3);
// Then
List<TDengineMeta> expectResMeta = DataSource.GetMetaFromDDL(createSql);
List<Object> expectResData = UtilsTools.CombineColAndTagData(colData, tagData, 3);
List<Object> expectResData2 = UtilsTools.CombineColAndTagData(colData2, tagData2, 3);
expectResData.AddRange(expectResData2);
var querySql = $"select * from {tableName}";
UtilsTools.ExecuteUpdate(conn, dropSql);
UtilsTools.ExecuteUpdate(conn, createSql);
UtilsTools.ExecuteUpdate(conn, insertSql);
IntPtr subscribe = TDengine.Subscribe(conn, true, tableName, querySql, null, IntPtr.Zero, 0);
_res = TDengine.Consume(subscribe);
// need to call fetch TAOS_RES
UtilsTools.GetResDataWithoutFree(_res);
TDengine.Unsubscribe(subscribe, true);
UtilsTools.ExecuteUpdate(conn, insertSql2);
Thread.Sleep(100);
subscribe = TDengine.Subscribe(conn, true, tableName, querySql, null, IntPtr.Zero, 0);
_res = TDengine.Consume(subscribe);
List<TDengineMeta> actualMeta = UtilsTools.GetResField(_res);
List<String> actualResData = UtilsTools.GetResDataWithoutFree(_res);
TDengine.Unsubscribe(subscribe, false);
Assert.Equal(expectResData.Count, actualResData.Count);
output.WriteLine("Assert Meta data");
//Assert Meta data
for (int i = 0; i < actualMeta.Count; i++)
{
Assert.Equal(expectResMeta[i].name, actualMeta[i].name);
Assert.Equal(expectResMeta[i].type, actualMeta[i].type);
Assert.Equal(expectResMeta[i].size, actualMeta[i].size);
}
output.WriteLine("Assert retrieve data");
// Assert retrieve data
for (int i = 0; i < actualResData.Count; i++)
{
// output.WriteLine("{0},{1},{2}", i, expectResData[i], actualResData[i]);
Assert.Equal(expectResData[i].ToString(), actualResData[i]);
}
}
/// <author>xiaolei</author>
/// <Name>SubscribeCases.ConsumeFromLastProgress</Name>
/// <describe>Subscribe table from the last progress.</describe>
/// <filename>Subscribe.cs</filename>
/// <result>pass or failed </result>
[Fact(DisplayName = "SubscribeCases.ConsumeFromLastProgress()"), TestExeOrder(2), Trait("Category", "Without callback")]
public void ConsumeFromLastProgress()
{
IntPtr conn = database.conn;
IntPtr _res = IntPtr.Zero;
var tableName = "subscribe_from_progress";
var createSql = $"create table if not exists {tableName}(ts timestamp,bl bool,i8 tinyint,i16 smallint,i32 int,i64 bigint,bnr binary(50),nchr nchar(50))tags(t_i32 int,t_bnr binary(50),t_nchr nchar(50))";
var dropSql = $"drop table if exists {tableName}";
var colData = new List<Object>{1646150410100,true,1,11,1111,11111111,"value one","值壹",
1646150410200,true,2,22,2222,22222222,"value two","值贰",
1646150410300,false,3,33,3333,33333333,"value three","值叁",
};
var colData2 = new List<Object>{1646150410400,false,4,44,4444,44444444,"value three","值肆",
1646150410500,true,5,55,5555,55555555,"value one","值伍",
1646150410600,true,6,66,6666,66666666,"value two","值陆",
};
var tagData = new List<Object> { 1, "tag_one", "标签壹" };
var tagData2 = new List<Object> { 2, "tag_two", "标签贰" };
String insertSql = UtilsTools.ConstructInsertSql(tableName + "_s01", tableName, colData, tagData, 3);
String insertSql2 = UtilsTools.ConstructInsertSql(tableName + "_s02", tableName, colData2, tagData2, 3);
// Then
List<TDengineMeta> expectResMeta = DataSource.GetMetaFromDDL(createSql);
List<Object> expectResData = UtilsTools.CombineColAndTagData(colData, tagData, 3);
List<Object> expectResData2 = UtilsTools.CombineColAndTagData(colData2, tagData2, 3);
var querySql = $"select * from {tableName}";
UtilsTools.ExecuteUpdate(conn, dropSql);
UtilsTools.ExecuteUpdate(conn, createSql);
UtilsTools.ExecuteUpdate(conn, insertSql);
// First time subscribe
IntPtr subscribe = TDengine.Subscribe(conn, true, tableName, querySql, null, IntPtr.Zero, 20);
_res = TDengine.Consume(subscribe);
// need to call fetch TAOS_RES
UtilsTools.GetResDataWithoutFree(_res);
// Close subscribe and save progress.
TDengine.Unsubscribe(subscribe, true);
// Insert new data.
UtilsTools.ExecuteUpdate(conn, insertSql2);
Thread.Sleep(1000);
subscribe = TDengine.Subscribe(conn, false, tableName, querySql, null, IntPtr.Zero, 20);
_res = TDengine.Consume(subscribe);
List<TDengineMeta> actualMeta = UtilsTools.GetResField(_res);
List<String> actualResData = UtilsTools.GetResDataWithoutFree(_res);
TDengine.Unsubscribe(subscribe, true);
output.WriteLine("Assert Meta data");
//Assert Meta data
for (int i = 0; i < actualMeta.Count; i++)
{
Assert.Equal(expectResMeta[i].name, actualMeta[i].name);
Assert.Equal(expectResMeta[i].type, actualMeta[i].type);
Assert.Equal(expectResMeta[i].size, actualMeta[i].size);
}
output.WriteLine("Assert retrieve data");
// Assert retrieve data
for (int i = 0; i < actualResData.Count; i++)
{
// output.WriteLine("{0},{1},{2}", i, expectResData[i], actualResData[i]);
Assert.Equal(expectResData2[i].ToString(), actualResData[i]);
}
}
}
}
\ No newline at end of file
using TDengineDriver;
using Test.UtilsTools;
using System;
using System.Collections.Generic;
using Xunit;
using Test.UtilsTools.DataSource;
using System.Threading;
using Xunit.Abstractions;
using Test.Fixture;
using Test.Case.Attributes;
namespace Cases
{
[TestCaseOrderer("XUnit.Case.Orderers.TestExeOrderer", "Cases.ExeOrder")]
[Collection("Database collection")]
public class SubscribeAsyncCases
{
DatabaseFixture database;
private readonly ITestOutputHelper output;
public SubscribeAsyncCases(DatabaseFixture fixture, ITestOutputHelper output)
{
this.database = fixture;
this.output = output;
}
/// <author>xiaolei</author>
/// <Name>SubscribeAsyncCases.ConsumeFromBegin</Name>
/// <describe>Subscribe a table and consume through callback and the beginning record of the table</describe>
/// <filename>Subscribe.cs</filename>
/// <result>pass or failed </result>
[Fact(DisplayName = "SubscribeAsyncCases.ConsumeFromBegin()"), TestExeOrder(1), Trait("Category", "With callback")]
public void ConsumeFromBegin()
{
IntPtr conn = database.conn;
IntPtr _res = IntPtr.Zero;
var tableName = "subscribe_async_from_begin";
var createSql = $"create table if not exists {tableName}(ts timestamp,bl bool,i8 tinyint,i16 smallint,i32 int,i64 bigint,bnr binary(50),nchr nchar(50))tags(t_i32 int,t_bnr binary(50),t_nchr nchar(50))";
var dropSql = $"drop table if exists {tableName}";
var colData = new List<Object>{1646150410100,true,1,11,1111,11111111,"value one","值壹",
1646150410200,true,2,22,2222,22222222,"value two","值贰",
1646150410300,false,3,33,3333,33333333,"value three","值三",
};
var colData2 = new List<Object>{1646150410400,false,4,44,4444,44444444,"value three","值肆",
1646150410500,true,5,55,5555,55555555,"value one","值伍",
1646150410600,true,6,66,6666,66666666,"value two","值陆",
};
var tagData = new List<Object> { 1, "tag_one", "标签壹" };
var tagData2 = new List<Object> { 2, "tag_two", "标签贰" };
String insertSql = UtilsTools.ConstructInsertSql(tableName + "_s01", tableName, colData, tagData, 3);
String insertSql2 = UtilsTools.ConstructInsertSql(tableName + "_s02", tableName, colData2, tagData2, 3);
List<TDengineMeta> expectResMeta = DataSource.GetMetaFromDDL(createSql);
List<Object> expectResData = UtilsTools.CombineColAndTagData(colData, tagData, 3);
List<Object> expectResData2 = UtilsTools.CombineColAndTagData(colData2, tagData2, 3);
expectResData.AddRange(expectResData2);
var querySql = $"select * from {tableName}";
UtilsTools.ExecuteUpdate(conn, dropSql);
UtilsTools.ExecuteUpdate(conn, createSql);
UtilsTools.ExecuteUpdate(conn, insertSql);
SubscribeCallback subscribeCallback1 = new SubscribeCallback(SubCallback1);
SubscribeCallback subscribeCallback2 = new SubscribeCallback(SubCallback2);
IntPtr subscribe = TDengine.Subscribe(conn, true, tableName, querySql, subscribeCallback1, IntPtr.Zero, 200);
UtilsTools.ExecuteUpdate(conn, insertSql2);
Thread.Sleep(1000);
TDengine.Unsubscribe(subscribe, true);
subscribe = TDengine.Subscribe(conn, true, tableName, querySql, subscribeCallback2, IntPtr.Zero, 200);
Thread.Sleep(1000);
TDengine.Unsubscribe(subscribe, false);
void SubCallback1(IntPtr subscribe, IntPtr taosRes, IntPtr param, int code)
{
if (code == 0 && taosRes != IntPtr.Zero)
{
// cannot free taosRes using taosRes, otherwise will cause crash.
UtilsTools.GetResDataWithoutFree(taosRes);
}
else
{
output.WriteLine($"async query data failed, failed code:{code}, reason:{TDengine.Error(taosRes)}");
}
}
void SubCallback2(IntPtr subscribe, IntPtr taosRes, IntPtr param, int code)
{
if (code == 0 && taosRes != IntPtr.Zero)
{
List<TDengineMeta> actualMeta = UtilsTools.GetResField(taosRes);
List<String> actualResData = UtilsTools.GetResDataWithoutFree(taosRes);
// UtilsTools.DisplayRes(taosRes);
if (actualResData.Count == 0)
{
output.WriteLine($"consume in subscribe callback without data");
}
else
{
output.WriteLine($"consume in subscribe callback with data");
Assert.Equal(expectResData.Count, actualResData.Count);
output.WriteLine("Assert Meta data");
//Assert Meta data
for (int i = 0; i < actualMeta.Count; i++)
{
Assert.Equal(expectResMeta[i].name, actualMeta[i].name);
Assert.Equal(expectResMeta[i].type, actualMeta[i].type);
Assert.Equal(expectResMeta[i].size, actualMeta[i].size);
}
output.WriteLine("Assert retrieve data");
// Assert retrieve data
for (int i = 0; i < actualResData.Count; i++)
{
// output.WriteLine("index:{0},expectResData:{1},actualResData:{2}", i, expectResData[i], actualResData[i]);
Assert.Equal(expectResData[i].ToString(), actualResData[i]);
}
}
}
else
{
output.WriteLine($"async query data failed, failed code:{code}, reason:{TDengine.Error(taosRes)}");
}
}
}
/// <author>xiaolei</author>
/// <Name>SubscribeAsyncCases.ConsumeFromLastProgress</Name>
/// <describe>Subscribe a table and consume through callback and from last consume progress.</describe>
/// <filename>Subscribe.cs</filename>
/// <result>pass or failed </result>
[Fact(DisplayName = "SubscribeAsyncCases.ConsumeFromLastProgress()"), TestExeOrder(2), Trait("Category", "With callback")]
public void ConsumeFromLastProgress()
{
IntPtr conn = database.conn;
IntPtr _res = IntPtr.Zero;
var tableName = "subscribe_async_from_begin";
var createSql = $"create table if not exists {tableName}(ts timestamp,bl bool,i8 tinyint,i16 smallint,i32 int,i64 bigint,bnr binary(50),nchr nchar(50))tags(t_i32 int,t_bnr binary(50),t_nchr nchar(50))";
var dropSql = $"drop table if exists {tableName}";
var colData = new List<Object>{1646150410100,true,1,11,1111,11111111,"value one","值壹",
1646150410200,true,2,22,2222,22222222,"value two","值贰",
1646150410300,false,3,33,3333,33333333,"value three","值三",
};
var colData2 = new List<Object>{1646150410400,false,4,44,4444,44444444,"value three","值肆",
1646150410500,true,5,55,5555,55555555,"value one","值伍",
1646150410600,true,6,66,6666,66666666,"value two","值陆",
};
var tagData = new List<Object> { 1, "tag_one", "标签壹" };
var tagData2 = new List<Object> { 2, "tag_two", "标签贰" };
String insertSql = UtilsTools.ConstructInsertSql(tableName + "_s01", tableName, colData, tagData, 3);
String insertSql2 = UtilsTools.ConstructInsertSql(tableName + "_s02", tableName, colData2, tagData2, 3);
List<TDengineMeta> expectResMeta = DataSource.GetMetaFromDDL(createSql);
List<Object> expectResData = UtilsTools.CombineColAndTagData(colData, tagData, 3);
List<Object> expectResData2 = UtilsTools.CombineColAndTagData(colData2, tagData2, 3);
var querySql = $"select * from {tableName}";
UtilsTools.ExecuteUpdate(conn, dropSql);
UtilsTools.ExecuteUpdate(conn, createSql);
UtilsTools.ExecuteUpdate(conn, insertSql);
SubscribeCallback subscribeCallback1 = new SubscribeCallback(SubCallback1);
SubscribeCallback subscribeCallback2 = new SubscribeCallback(SubCallback2);
IntPtr subscribe = TDengine.Subscribe(conn, true, tableName, querySql, subscribeCallback1, IntPtr.Zero, 200);
Thread.Sleep(1000);
TDengine.Unsubscribe(subscribe, true);
UtilsTools.ExecuteUpdate(conn, insertSql2);
subscribe = TDengine.Subscribe(conn, false, tableName, querySql, subscribeCallback2, IntPtr.Zero, 200);
Thread.Sleep(1000);
TDengine.Unsubscribe(subscribe, false);
void SubCallback1(IntPtr subscribe, IntPtr taosRes, IntPtr param, int code)
{
if (code == 0 && taosRes != IntPtr.Zero)
{
// cannot free taosRes using taosRes, otherwise will cause crash.
UtilsTools.GetResDataWithoutFree(taosRes);
}
else if (taosRes != IntPtr.Zero)
{
output.WriteLine($"async query data failed, failed code:{code}, reason:{TDengine.Error(taosRes)}");
}
}
void SubCallback2(IntPtr subscribe, IntPtr taosRes, IntPtr param, int code)
{
if (code == 0 && taosRes != IntPtr.Zero)
{
List<TDengineMeta> actualMeta = UtilsTools.GetResField(taosRes);
List<String> actualResData = UtilsTools.GetResDataWithoutFree(taosRes);
UtilsTools.DisplayRes(taosRes);
if (actualResData.Count == 0)
{
output.WriteLine($"consume in subscribe callback without data");
}
else
{
output.WriteLine($"consume in subscribe callback with data");
Assert.Equal(expectResData2.Count, actualResData.Count);
output.WriteLine("Assert Meta data");
//Assert Meta data
for (int i = 0; i < actualMeta.Count; i++)
{
Assert.Equal(expectResMeta[i].name, actualMeta[i].name);
Assert.Equal(expectResMeta[i].type, actualMeta[i].type);
Assert.Equal(expectResMeta[i].size, actualMeta[i].size);
}
output.WriteLine("Assert retrieve data");
// Assert retrieve data
for (int i = 0; i < actualResData.Count; i++)
{
// output.WriteLine("index:{0},expectResData:{1},actualResData:{2}", i, expectResData[i], actualResData[i]);
Assert.Equal(expectResData2[i].ToString(), actualResData[i]);
}
}
}
else
{
output.WriteLine($"async query data failed, failed code:{code}, reason:{TDengine.Error(taosRes)}");
}
}
}
}
}
\ No newline at end of file
......@@ -48,30 +48,30 @@ namespace Test.Fixture
public void Dispose()
{
// IntPtr res;
// if (conn != IntPtr.Zero)
// {
// if ((res = TDengine.Query(conn, $"drop database if exists {db}")) != IntPtr.Zero)
// {
// if (TDengine.Close(conn) == 0)
// {
// Console.WriteLine("close connection success");
// }
// else
// {
// throw new Exception("close connection failed");
// }
IntPtr res;
if (conn != IntPtr.Zero)
{
if ((res = TDengine.Query(conn, $"drop database if exists {db}")) != IntPtr.Zero)
{
if (TDengine.Close(conn) == 0)
{
Console.WriteLine("close connection success");
}
else
{
throw new Exception("close connection failed");
}
// }
// else
// {
// throw new Exception(TDengine.Error(res));
// }
// }
// else
// {
// throw new Exception("connection if already null");
// }
}
else
{
throw new Exception(TDengine.Error(res));
}
}
else
{
throw new Exception("connection if already null");
}
}
......
......@@ -104,11 +104,12 @@ namespace Test.UtilsTools
IntPtr rowdata;
List<string> dataList = QueryRes(res, metaList);
for (int i = 0; i < metaList.Count; i++)
for (int i = 0; i < dataList.Count; i += metaList.Count)
{
for (int j = 0; j < dataList.Count; j++)
for (int j = 0; j < metaList.Count; j++)
{
Console.Write(" {0} \t|", dataList[j]);
Console.Write(" {0} \t|", dataList[i + j]);
}
Console.WriteLine("");
}
......
......@@ -385,6 +385,11 @@ int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t
case TSDB_DATA_TYPE_TINYINT: DEFAULT_COMP(GET_INT8_VAL(f1), GET_INT8_VAL(f2));
case TSDB_DATA_TYPE_BINARY: {
bool leftIsNull = isNull(f1, TSDB_DATA_TYPE_BINARY);
bool rightIsNull = isNull(f2, TSDB_DATA_TYPE_BINARY);
if(leftIsNull && rightIsNull) return 0;
else if(leftIsNull) return -1;
else if(rightIsNull) return 1;
int32_t len1 = varDataLen(f1);
int32_t len2 = varDataLen(f2);
......@@ -408,6 +413,12 @@ int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t
return (ret < 0) ? -1 : 1;
};
case TSDB_DATA_TYPE_NCHAR: { // todo handle the var string compare
bool leftIsNull = isNull(f1, TSDB_DATA_TYPE_NCHAR);
bool rightIsNull = isNull(f2, TSDB_DATA_TYPE_NCHAR);
if(leftIsNull && rightIsNull) return 0;
else if(leftIsNull) return -1;
else if(rightIsNull) return 1;
int32_t len1 = varDataLen(f1);
int32_t len2 = varDataLen(f2);
......
......@@ -180,6 +180,12 @@ int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
}
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
bool leftIsNull = isNull(pLeft, TSDB_DATA_TYPE_BINARY);
bool rightIsNull = isNull(pRight, TSDB_DATA_TYPE_BINARY);
if(leftIsNull && rightIsNull) return 0;
else if(leftIsNull) return -1;
else if(rightIsNull) return 1;
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
......@@ -200,6 +206,12 @@ int32_t compareLenPrefixedStrDesc(const void* pLeft, const void* pRight) {
}
int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
bool leftIsNull = isNull(pLeft, TSDB_DATA_TYPE_NCHAR);
bool rightIsNull = isNull(pRight, TSDB_DATA_TYPE_NCHAR);
if(leftIsNull && rightIsNull) return 0;
else if(leftIsNull) return -1;
else if(rightIsNull) return 1;
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
......
此差异已折叠。
此差异已折叠。
......@@ -560,7 +560,6 @@
10,,script,./test.sh -f unique/stable/replica2_vnode3.sim
10,,pytest,python3 testCompress.py
10,,pytest,python3 test.py -f client/client.py
10,,script,eval sh -c \"if [ `uname -m` != aarch64 ]; then ./test.sh -f general/parser/scalar_expression.sim; fi\"
10,,script,./test.sh -f general/compute/scalar_pow.sim
9,,script,./test.sh -f general/parser/alter1.sim
9,,script,./test.sh -f general/db/delete.sim
......@@ -715,6 +714,8 @@
5,,develop-test,python3 ./test.py -f 2-query/time_window_keywords.py
5,,develop-test,python3 ./test.py -f 2-query/TD-13946.py
5,,develop-test,python3 ./test.py -f 2-query/query_window_keywords.py
5,,develop-test,python3 ./test.py -f 2-query/scalar_triangle.py
5,,develop-test,python3 ./test.py -f 2-query/scalar_expression.py
4,,system-test,python3 test.py -f 4-taosAdapter/TD-12163.py
4,,system-test,python3 ./test.py -f 3-connectors/restful/restful_binddbname.py
4,,system-test,python3 ./test.py -f 2-query/TD-12614.py
......@@ -800,3 +801,5 @@
2,,develop-test,python3 ./test.py -f 2-query/function_hll.py
1,,develop-test,python3 ./test.py -f 2-query/function_state.py
1,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/demo.py
8,,pytest,python3 test.py -f update/update2.py
4,,pytest,python3 test.py -f insert/line_insert.py
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import random
import datetime
import threading
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.lock = threading.RLock()
self.ts = []
def restartTaosd(self):
tdDnodes.stop(1)
tdDnodes.startWithoutSleep(1)
tdSql.execute("use db")
def insertData(self):
self.lock.acquire()
try:
sql = "insert into test2.warning_1 values "
for i in range(10):
ct = datetime.datetime.now()
t = int(ct.timestamp() * 1000)
self.ts.append(t)
wait = random.randint(1, 9)
time.sleep(0.0001 * wait)
sql += "(%d, %d, 0, 0, 0, %d, 0, %f, %f, 0, 0, %d, %d, False, 0, '', '', 0, False, %d)" % (t, random.randint(0, 20), random.randint(1, 10000), random.uniform(0, 1), random.uniform(0, 1), random.randint(1, 10000), random.randint(1, 10000), t)
tdSql.execute(sql)
finally:
self.lock.release()
def updateData(self):
self.lock.acquire()
try:
sql = "insert into test2.warning_1(ts,endtime,maxspeed,endlongitude,endlatitude,drivercard_id,status,endmileage) values "
for t in self.ts:
sql += "(%d, %d, 0, %f, %f, 0, False, %d)" % (t, t, random.uniform(0, 1), random.uniform(0, 1), random.randint(1, 10000))
tdSql.execute(sql)
self.ts.clear()
finally:
self.lock.release()
def run(self):
tdSql.execute("CREATE DATABASE test2 CACHE 1 BLOCKS 3 UPDATE 2")
tdSql.execute("use test2")
tdSql.execute('''CREATE TABLE test2.fx_car_warning (ts TIMESTAMP, type TINYINT, level TINYINT, origin TINYINT, endtime BIGINT, mileage INT, maxspeed DOUBLE,
longitude DOUBLE, latitude DOUBLE, endlongitude DOUBLE, endlatitude DOUBLE, drivercard_id BIGINT, infoid INT, status BOOL, endmileage INT,
duty_officer NCHAR(10), content NCHAR(100), cltime BIGINT, clstatus BOOL, starttime BIGINT) TAGS (catid BIGINT, car_id BIGINT, mytype TINYINT)''')
tdSql.execute("create table test2.warning_1 using test2.fx_car_warning tags(1, 1, 0)")
tdLog.sleep(1)
for i in range(1000):
t1 = threading.Thread(target=self.insertData, args=( ))
t2 = threading.Thread(target=self.updateData, args=( ))
t1.start()
t2.start()
t1.join()
t2.join()
tdSql.query("select * from test2.fx_car_warning where type is null")
tdSql.checkRows(0)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
......@@ -1159,10 +1159,14 @@ if $data40 != 4 then
return -1
endi
if $data50 != NULL then
return -1
if $data50 != 9223372036854775807 then #for arm64
return -1
endi
endi
if $data60 != NULL then
return -1
if $data60 != 9223372036854775807 then #for arm64
return -1
endi
endi
sql select cast(c6 as binary(60)) from tb1;
if $rows != 7 then
......@@ -1234,10 +1238,14 @@ if $data40 != @70-01-01 08:00:00.004@ then
return -1
endi
if $data50 != NULL then
return -1
if $data50 != @94-08-17 15:12:55.807@ then #for arm64
return -1
endi
endi
if $data60 != NULL then
return -1
if $data60 != @94-08-17 15:12:55.807@ then #for arm64
return -1
endi
endi
sql select cast(c6 as nchar(50)) from tb1;
if $rows != 7 then
......@@ -1332,10 +1340,14 @@ if $data40 != 4 then
return -1
endi
if $data50 != NULL then
return -1
if $data50 != 9223372036854775807 then #for arm64
return -1
endi
endi
if $data60 != NULL then
return -1
if $data60 != 9223372036854775807 then #for arm64
return -1
endi
endi
sql select cast(c7 as binary(400)) from tb1;
if $rows != 7 then
......@@ -1407,10 +1419,14 @@ if $data40 != @70-01-01 08:00:00.004@ then
return -1
endi
if $data50 != NULL then
return -1
if $data50 != @94-08-17 15:12:55.807@ then #for arm64
return -1
endi
endi
if $data60 != NULL then
return -1
if $data60 != @94-08-17 15:12:55.807@ then #for arm64
return -1
endi
endi
sql select cast(c7 as nchar(500)) from tb1;
if $rows != 7 then
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/exec.sh -n dnode1 -s start
sleep 500
sql connect
$dbPrefix = db
$tbPrefix = ct
$mtPrefix = st
$tbNum = 2
$rowNum = 50
print =============== step1 create stable/table
$i = 0
$db = $dbPrefix . $i
$mt = $mtPrefix . $i
sql drop database $db -x step1
step1:
sql create database $db
sql use $db
sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS (tgcol int)
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
$x = 0
$y = 0.25
while $x < $rowNum
$cc = $x * 60000
$ms = 1601481600000 + $cc
sql insert into $tb values ($ms , $x , $y , $x , $x , $x , $y , $x , $x , $x )
$x = $x + 1
$y = $y + 1
endw
$i = $i + 1
endw
print ================= step2
print execute sql select floor(3.0)+ceil(4.0) from ct0
sql select floor(3.0)+ceil(4.0) from ct0
if $data00 != @7.000000000@ then
return -1
endi
if $data10 != @7.000000000@ then
return -1
endi
if $data20 != @7.000000000@ then
return -1
endi
if $data30 != @7.000000000@ then
return -1
endi
if $data40 != @7.000000000@ then
return -1
endi
if $data50 != @7.000000000@ then
return -1
endi
if $data60 != @7.000000000@ then
return -1
endi
if $data70 != @7.000000000@ then
return -1
endi
if $data80 != @7.000000000@ then
return -1
endi
if $data90 != @7.000000000@ then
return -1
endi
print execute sql select sum(c1)+3.0+4.0 from st0
sql select sum(c1)+3.0+4.0 from st0
if $data00 != @2457.000000000@ then
return -1
endi
print execute sql select sin(log(avg(c1),sum(c2))+3)%4 from st0
sql select sin(log(avg(c1),sum(c2))+3)%4 from st0
if $data00 != @-0.265074286@ then
return -1
endi
print execute sql select log(pow(length(concat('3','4')),2),c2) from st0
sql select log(pow(length(concat('3','4')),2),c2) from st0
if $data00 != @-1.000000000@ then
return -1
endi
if $data10 != @inf@ then
return -1
endi
if $data20 != @2.000000000@ then
return -1
endi
if $data30 != @1.261859507@ then
return -1
endi
if $data40 != @1.000000000@ then
return -1
endi
if $data50 != @0.861353116@ then
return -1
endi
if $data60 != @0.773705614@ then
return -1
endi
if $data70 != @0.712414374@ then
return -1
endi
if $data80 != @0.666666667@ then
return -1
endi
if $data90 != @0.630929754@ then
return -1
endi
print execute sql select round(log(pow(length(concat('3','4')),2),c2)+floor(c3))+2 from st0
sql select round(log(pow(length(concat('3','4')),2),c2)+floor(c3))+2 from st0
if $data00 != @1.000000000@ then
return -1
endi
if $data10 != @inf@ then
return -1
endi
if $data20 != @6.000000000@ then
return -1
endi
if $data30 != @6.000000000@ then
return -1
endi
if $data40 != @7.000000000@ then
return -1
endi
if $data50 != @8.000000000@ then
return -1
endi
if $data60 != @9.000000000@ then
return -1
endi
if $data70 != @10.000000000@ then
return -1
endi
if $data80 != @11.000000000@ then
return -1
endi
if $data90 != @12.000000000@ then
return -1
endi
print execute sql select sin(pow(c1,log(c2,2))+pow(c2,2)) as val from ct0 union all select pow(c4,2)+tan(sin(c5)/cos(c6)) as val from ct1
sql select sin(pow(c1,log(c2,2))+pow(c2,2)) as val from ct0 union all select pow(c4,2)+tan(sin(c5)/cos(c6)) as val from ct1
if $data00 != @-nan@ then
return -1
endi
if $data10 != @0.909297427@ then
return -1
endi
if $data20 != @-0.279415498@ then
return -1
endi
if $data30 != @0.843325058@ then
return -1
endi
if $data40 != @0.551426681@ then
return -1
endi
if $data50 != @-0.840606612@ then
return -1
endi
if $data60 != @0.436161076@ then
return -1
endi
if $data70 != @0.897498185@ then
return -1
endi
if $data80 != @-0.885952778@ then
return -1
endi
if $data90 != @0.429470715@ then
return -1
endi
print execute sql select asin(c1) from st0 limit 1
sql select asin(c1) from st0 limit 1
if $data00 != @0.000000000@ then
return -1
endi
print execute sql select pow(c1,2) from st0 limit 1 offset 2;
sql select pow(c1,2) from st0 limit 1 offset 2;
if $data00 != @4.000000000@ then
return -1
endi
print execute sql select cos(c1) from db0.ct0, db0.ct1 where ct0.ts==ct1.ts
sql select cos(c1) from db0.ct0, db0.ct1 where ct0.ts==ct1.ts
if $data00 != @1.000000000@ then
return -1
endi
if $data10 != @0.540302306@ then
return -1
endi
if $data20 != @-0.416146837@ then
return -1
endi
if $data30 != @-0.989992497@ then
return -1
endi
if $data40 != @-0.653643621@ then
return -1
endi
if $data50 != @0.283662185@ then
return -1
endi
if $data60 != @0.960170287@ then
return -1
endi
if $data70 != @0.753902254@ then
return -1
endi
if $data80 != @-0.145500034@ then
return -1
endi
if $data90 != @-0.911130262@ then
return -1
endi
print ============== invalid expressions
$stb = st0
sql_error select agg(c1)+c2 from $stb
sql_error select agg(c1+2) from $stb
sql_error select agg(ceil(c1))+c2 from $stb
sql_error select agg(ceil(c1)) from $stb
sql_error select asin(c1) from $stb group by tbname
sql_error select asin(c2) from $stb group by tgcol
sql_error select asin(c1) from $stb session_window(ts, 1s)
sql_error select cos(c1) from $stb state_window(f1)
sql_error select pow(c2,2) from $stb interval(1s) sliding(500a)
sql_error select pow(c2,2) from $stb state_window(f1)
sql_error select tan(c1) from $stb from d.st slimit 1
sql_error select c1+2, tbname from $stb group by tbname
sql select sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(c1)))))))))))))))) from $stb
sql_error select sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(sin(c1))))))))))))))))) from $stb
print =============== clear
sql drop database $db
sql show databases
if $rows != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册