diff --git a/Jenkinsfile b/Jenkinsfile index f2e3c1c4f6c3754f33f56575c4f6b89170e36948..35a2bf82606313fe015457cda3a6a57c23e2ef4d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4,8 +4,10 @@ import jenkins.model.CauseOfInterruption node { } -def skipbuild=0 -def win_stop=0 +def skipbuild = 0 +def win_stop = 0 +def scope = [] +def mod = [0,1,2,3,4] def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -349,7 +351,7 @@ pipeline { } stages { stage('pre_build'){ - agent{label 'master'} + agent{label 'catalina'} options { skipDefaultCheckout() } when { changeRequest() @@ -358,44 +360,32 @@ pipeline { script{ abort_previous() abortPreviousBuilds() - } - // sh''' - // rm -rf ${WORKSPACE}.tes - // cp -r ${WORKSPACE} ${WORKSPACE}.tes - // cd ${WORKSPACE}.tes - // git fetch - // ''' - // script { - // if (env.CHANGE_TARGET == 'master') { - // sh ''' - // git checkout master - // ''' - // } - // else if(env.CHANGE_TARGET == '2.0'){ - // sh ''' - // git checkout 2.0 - // ''' - // } - // else{ - // sh ''' - // git checkout develop - // ''' - // } - // } - // sh''' - // git fetch origin +refs/pull/${CHANGE_ID}/merge - // git checkout -qf FETCH_HEAD - // ''' + println env.CHANGE_BRANCH + if(env.CHANGE_FORK){ + scope = ['connector','query','insert','other','tools','taosAdapter'] + } + else{ + sh''' + cd ${WKC} + git fetch + git checkout ${CHANGE_BRANCH} + git pull + ''' + dir('/var/lib/jenkins/workspace/TDinternal/community'){ + gitlog = sh(script: "git log -1 --pretty=%B ", returnStdout:true) + println gitlog + if (!(gitlog =~ /\((.*?)\)/)){ + autoCancelled = true + error('Aborting the build.') + } + temp = (gitlog =~ /\((.*?)\)/) + temp = temp[0].remove(1) + scope = temp.split(",") + Collections.shuffle mod + } - // script{ - // skipbuild='2' - // skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) - // println skipbuild - // } - // sh''' - // rm -rf ${WORKSPACE}.tes - // ''' - // } + } + } } } stage('Parallel test stage') { @@ -408,239 +398,90 @@ pipeline { } } parallel { - stage('python_1_s1') { - agent{label " slave1 || slave11 "} + stage('python_1') { + agent{label " slave1 || slave6 || slave11 || slave16 "} steps { pre_test() timeout(time: 55, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - ./test-all.sh p1 - date''' - } + script{ + scope.each { + sh """ + date + cd ${WKC}/tests + ./test-CI.sh ${it} 5 ${mod[0]} + date""" + } + } + } } } - stage('python_2_s5') { - agent{label " slave5 || slave15 "} + stage('python_2') { + agent{label " slave2 || slave7 || slave12 || slave17 "} steps { pre_test() timeout(time: 55, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - ./test-all.sh p2 - date''' - } - } - } - stage('python_3_s6') { - agent{label " slave6 || slave16 "} - steps { - timeout(time: 55, unit: 'MINUTES'){ - pre_test() - sh ''' - date - cd ${WKC}/tests - ./test-all.sh p3 - date''' + script{ + scope.each { + sh """ + date + cd ${WKC}/tests + ./test-CI.sh ${it} 5 ${mod[1]} + date""" + } + } } } } - stage('test_b1_s2') { - agent{label " slave2 || slave12 "} + stage('python_3') { + agent{label " slave3 || slave8 || slave13 ||slave18 "} steps { timeout(time: 105, unit: 'MINUTES'){ pre_test() - sh ''' - rm -rf /var/lib/taos/* - rm -rf /var/log/taos/* - nohup taosd >/dev/null & - sleep 10 - ''' - - sh ''' - cd ${WKC}/src/connector/python - export PYTHONPATH=$PWD/ - export LD_LIBRARY_PATH=${WKC}/debug/build/lib - pip3 install pytest - pytest tests/ - - python3 examples/bind-multi.py - python3 examples/bind-row.py - python3 examples/demo.py - python3 examples/insert-lines.py - python3 examples/pep-249.py - python3 examples/query-async.py - python3 examples/query-objectively.py - python3 examples/subscribe-sync.py - python3 examples/subscribe-async.py - ''' - - sh ''' - cd ${WKC}/src/connector/nodejs - npm install - npm run test - cd ${WKC}/tests/examples/nodejs - npm install td2.0-connector > /dev/null 2>&1 - node nodejsChecker.js host=localhost - node test1970.js - cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport - npm install td2.0-connector > /dev/null 2>&1 - node nanosecondTest.js - ''' - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WKC}/src/connector/C# - dotnet test - dotnet run --project src/test/Cases/Cases.csproj - - cd ${WKC}/tests/examples/C# - dotnet run --project C#checker/C#checker.csproj - dotnet run --project TDengineTest/TDengineTest.csproj - dotnet run --project schemaless/schemaless.csproj - - cd ${WKC}/tests/examples/C#/taosdemo - dotnet build -c Release - tree | true - ./bin/Release/net5.0/taosdemo -c /etc/taos -y - ''' + script{ + scope.each { + sh """ + date + cd ${WKC}/tests + ./test-CI.sh ${it} 5 ${mod[2]} + date""" + } } - sh ''' - cd ${WKC}/tests/gotest - bash batchtest.sh - ''' - sh ''' - cd ${WKC}/tests - ./test-all.sh b1fq - date''' } } } - stage('test_crash_gen_s3') { - agent{label " slave3 || slave13 "} - + stage('python_4') { + agent{label " slave4 || slave9 || slave14 || slave19 "} steps { - pre_test() - timeout(time: 60, unit: 'MINUTES'){ - sh ''' - cd ${WKC}/tests/pytest - ./crash_gen.sh -a -p -t 4 -s 2000 - ''' - } - timeout(time: 60, unit: 'MINUTES'){ - sh ''' - cd ${WKC}/tests/pytest - rm -rf /var/lib/taos/* - rm -rf /var/log/taos/* - ./handle_crash_gen_val_log.sh - ''' - sh ''' - cd ${WKC}/tests/pytest - rm -rf /var/lib/taos/* - rm -rf /var/log/taos/* - ./handle_taosd_val_log.sh - ''' - } timeout(time: 55, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b2fq - date - ''' - } - } - } - stage('test_valgrind_s4') { - agent{label " slave4 || slave14 "} - - steps { - pre_test() - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WKC}/tests/pytest - ./valgrind-test.sh 2>&1 > mem-error-out.log - ./handle_val_log.sh - ''' - } - timeout(time: 55, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b3fq - date''' - sh ''' - date - cd ${WKC}/tests - ./test-all.sh full example - date''' - } - } - } - stage('test_b4_s7') { - agent{label " slave7 || slave17 "} - steps { - timeout(time: 105, unit: 'MINUTES'){ pre_test() - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b4fq - cd ${WKC}/tests - ./test-all.sh p4 - ''' - // cd ${WKC}/tests - // ./test-all.sh full jdbc - // cd ${WKC}/tests - // ./test-all.sh full unit - } - } - } - stage('test_b5_s8') { - agent{label " slave8 || slave18 "} - steps { - timeout(time: 55, unit: 'MINUTES'){ - pre_test() - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b5fq - date''' - } - } - } - stage('test_b6_s9') { - agent{label " slave9 || slave19 "} - steps { - timeout(time: 55, unit: 'MINUTES'){ - pre_test() - sh ''' - cd ${WKC}/tests - ./test-all.sh develop-test - ''' - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b6fq - date''' + script{ + scope.each { + sh """ + date + cd ${WKC}/tests + ./test-CI.sh ${it} 5 ${mod[3]} + date""" + } + } + } } } - stage('test_b7_s10') { - agent{label " slave10 || slave20 "} + stage('python_5') { + agent{label " slave5 || slave10 || slave15 || slave20 "} steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() - sh ''' - cd ${WKC}/tests - ./test-all.sh system-test - ''' - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b7fq - date''' + script{ + scope.each { + sh """ + date + cd ${WKC}/tests + ./test-CI.sh ${it} 5 ${mod[4]} + date""" + } + } + } } } @@ -813,3 +654,4 @@ pipeline { } } } + diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index fe1024ae7f5af41f0925f4636616a75a6a6f894b..dd3de6b0171212509c730364651af023dc50681d 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -594,6 +594,65 @@ public void setString(int columnIndex, ArrayList list, int size) throws public void setNString(int columnIndex, ArrayList list, int size) throws SQLException ``` +### 设置客户端参数 +从TDengine-2.3.5.0版本开始,jdbc driver支持在应用的第一次连接中,设置TDengine的客户端参数。Driver支持JDBC-JNI方式中,通过jdbcUrl和properties两种方式设置client parameter。 +注意: +* JDBC-RESTful不支持设置client parameter的功能。 +* 应用中设置的client parameter为进程级别的,即如果要更新client的参数,需要重启应用。这是因为client parameter是全局参数,仅在应用程序的第一次设置生效。 +* 以下示例代码基于taos-jdbcdriver-2.0.36。 + +示例代码: +```java +public class ClientParameterSetting { + private static final String host = "127.0.0.1"; + + public static void main(String[] args) throws SQLException { + setParameterInJdbcUrl(); + + setParameterInProperties(); + } + + private static void setParameterInJdbcUrl() throws SQLException { + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?debugFlag=135&asyncLog=0"; + + Connection connection = DriverManager.getConnection(jdbcUrl, "root", "taosdata"); + + printDatabase(connection); + + connection.close(); + } + + private static void setParameterInProperties() throws SQLException { + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; + Properties properties = new Properties(); + properties.setProperty("user", "root"); + properties.setProperty("password", "taosdata"); + properties.setProperty("debugFlag", "135"); + properties.setProperty("asyncLog", "0"); + properties.setProperty("maxSQLLength", "1048576"); + + try (Connection conn = DriverManager.getConnection(jdbcUrl, properties)) { + printDatabase(conn); + } + } + + private static void printDatabase(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement()) { + ResultSet rs = stmt.executeQuery("show databases"); + + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + } + } +} +``` + + ## 订阅 ### 创建 diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md index e2f921cc973c28f16f491705800012e1f6a6f074..d79d07661b95f8a807bff226185d3804e0ce0f4d 100644 --- a/documentation20/cn/09.connections/docs.md +++ b/documentation20/cn/09.connections/docs.md @@ -7,11 +7,21 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ### 安装Grafana -目前 TDengine 支持 Grafana 6.2 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download。 +目前 TDengine 支持 Grafana 7.0 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:。 ### 配置Grafana -TDengine 的 Grafana 插件请从 下载。 +TDengine 的 Grafana 插件托管在GitHub,可从 下载,当前最新版本为 3.1.3。 + +推荐使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件安装。 + +```bash +sudo -u grafana grafana-cli \ + --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip \ + plugins install tdengine-datasource +``` + +或者下载到本地并解压到 Grafana 插件目录。 ```bash GF_VERSION=3.1.3 @@ -31,11 +41,18 @@ Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 gra allow_loading_unsigned_plugins = tdengine-datasource ``` +在Docker环境下,可以使用如下的环境变量设置自动安装并设置 TDengine 插件: + +```bash +GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip;tdengine-datasource +GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource +``` + ### 使用 Grafana #### 配置数据源 -用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: +用户可以直接通过 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: ![img](../images/connections/add_datasource1.jpg) diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index 676983c87995255eeb54646b9efede38e7162feb..d8936ad8c27387aaff3d6fde3cdb70915290a114 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -45,7 +45,7 @@ arbitrator ha.taosdata.com:6042 一定要修改的参数是firstEp和fqdn。在每个数据节点,firstEp需全部配置成一样,**但fqdn一定要配置成其所在数据节点的值**。其他参数可不做任何修改,除非你很清楚为什么要修改。 -**加入到集群中的数据节点dnode,涉及集群相关的下表11项参数必须完全相同,否则不能成功加入到集群中。** +**加入到集群中的数据节点dnode,涉及集群相关的下表9项参数必须完全相同,否则不能成功加入到集群中。** | **#** | **配置参数名称** | **含义** | | ----- | ------------------ | ---------------------------------------- | @@ -68,6 +68,8 @@ arbitrator ha.taosdata.com:6042 ``` Welcome to the TDengine shell from Linux, Client Version:2.0.0.0 + + Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. taos> show dnodes; diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md old mode 100644 new mode 100755 index 31e1ed4cc1ba7372a276391b2711a56f63b63ecc..beec934e91d4132526f532dffd02f11dfb3c32c5 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -48,11 +48,12 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | +| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | | 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | | 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | | 9 | BOOL | 1 | 布尔型,{true, false} | | 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 | +| 11 | JSON | | json数据类型, 只有tag类型可以是json格式 | **Tips**: @@ -682,6 +683,48 @@ taos> SELECT SERVER_STATUS() AS status; Query OK, 1 row(s) in set (0.000081s) ``` +函数_block_dist()使用说明 +
语法 + +SELECT _block_dist() FROM { tb_name | stb_name } + +功能说明:获得指定的(超级)表的数据块分布信息 + +返回结果类型:字符串。 + + +适用数据类型:不能输入任何参数。 + +嵌套子查询支持:不支持子查询或嵌套查询。 + + +说明: + +返回 FROM 子句中输入的表或超级表的数据块分布情况。不支持查询条件。 + +返回的结果是该表或超级表的数据块所包含的行数的数据分布直方图。 + +返回结果如下: +``` +summary: +5th=[392], 10th=[392], 20th=[392], 30th=[392], 40th=[792], 50th=[792] 60th=[792], 70th=[792], 80th=[792], 90th=[792], 95th=[792], 99th=[792] Min=[392(Rows)] Max=[800(Rows)] Avg=[666(Rows)] Stddev=[2.17] Rows=[2000], Blocks=[3], Size=[5.440(Kb)] Comp=[0.23] RowsInMem=[0] SeekHeaderTime=[1(us)] +``` +上述信息的说明如下: +
1、查询的(超级)表所包含的存储在文件中的数据块(data block)中所包含的数据行的数量分布直方图信息:5%, 10%, 20%, 30%, 40%, 50%, 60%, 70%, 80%, 90%, 95%, 99% 的数值; +
2、所有数据块中,包含行数最少的数据块所包含的行数量, 其中的 Min 指标 392 行。 +
3、所有数据块中,包含行数最多的数据块所包含的行数量, 其中的 Max 指标 800 行。 +
4、所有数据块行数的算数平均值 666行(其中的 Avg 项)。 +
5、所有数据块中行数分布的均方差为 2.17 ( stddev )。 +
6、数据块包含的行的总数为 2000 行(Rows)。 +
7、数据块总数是 3 个数据块 (Blocks)。 +
8、数据块占用磁盘空间大小 5.44 Kb (size)。 +
9、压缩后的数据块的大小除以原始数据的所获得的压缩比例: 23%(Comp),及压缩后的数据规模是原始数据规模的 23%。 +
10、内存中存在的数据行数是0,表示内存中没有数据缓存。 +
11、获取数据块信息的过程中读取头文件的时间开销 1 微秒(SeekHeaderTime)。 + +支持版本:指定计算算法的功能从2.1.0.x 版本开始,2.1.0.0之前的版本不支持指定使用算法的功能。 + + #### TAOS SQL中特殊关键词 > TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名
@@ -1603,6 +1646,15 @@ TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进 IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 +**ORDER BY的限制** + +- 非超级表只能有一个order by. +- 超级表最多两个order by, 并且第二个必须为ts. +- order by tag,必须和group by tag一起,并且是同一个tag。 tbname和tag一样逻辑。 只适用于超级表 +- order by 普通列,必须和group by一起或者和top/bottom一起,并且是同一个普通列。 适用于超级表和普通表。如果同时存在 group by和 top/bottom一起,order by优先必须和group by同一列。 +- order by ts. 适用于超级表和普通表。 +- order by ts同时含有group by时 针对group内部用ts排序 + ## 表(列)名合法性说明 TDengine 中的表(列)名命名规则如下: 只能由字母、数字、下划线构成,数字不能在首位,长度不能超过192字节,不区分大小写。 @@ -1618,3 +1670,87 @@ TDengine 中的表(列)名命名规则如下: 支持版本 支持转义符的功能从 2.3.0.1 版本开始。 + + +## Json类型使用说明 +- 语法说明 + + 1. 创建json类型tag + + ```mysql + create stable s1 (ts timestamp, v1 int) tags (info json) + + create table s1_1 using s1 tags ('{"k1": "v1"}') + ``` + 3. json取值操作符 -> + + ```mysql + select * from s1 where info->'k1' = 'v1' + + select info->'k1' from s1 + ``` + 4. json key是否存在操作符 contains + + ```mysql + select * from s1 where info contains 'k2' + + select * from s1 where info contains 'k1' + ``` + +- 支持的操作 + + 1. 在where条件中时,支持函数match/nmatch/between and/like/and/or/is null/is no null,不支持in + + ```mysql + select * from s1 where info→'k1' match 'v*'; + + select * from s1 where info→'k1' like 'v%' and info contains 'k2'; + + select * from s1 where info is null; + + select * from s1 where info->'k1' is not null + ``` + + 2. 支持json tag放在group by、order by、join子句、union all以及子查询中,比如group by json->'key' + + 3. 支持distinct操作. + + ```mysql + select distinct info→'k1' from s1 + ``` + + 5. 标签操作 + + 支持修改json标签值(全量覆盖) + + 支持修改json标签名 + + 不支持添加json标签、删除json标签、修改json标签列宽 + +- 其他约束条件 + + 1. 只有标签列可以使用json类型,如果用json标签,标签列只能有一个。 + + 2. 长度限制:json 中key的长度不能超过256,并且key必须为可打印ascii字符;json字符串总长度不超过4096个字节。 + + 3. json格式限制: + + 1. json输入字符串可以为空("","\t"," "或null)或object,不能为非空的字符串,布尔型和数组。 + 2. object 可为{},如果object为{},则整个json串记为空。key可为"",若key为"",则json串中忽略该k-v对。 + 3. value可以为数字(int/double)或字符串或bool或null,暂不可以为数组。不允许嵌套。 + 4. 若json字符串中出现两个相同的key,则第一个生效。 + 5. json字符串里暂不支持转义。 + + 4. 当查询json中不存在的key时,返回NULL + + 5. 当json tag作为子查询结果时,不再支持上层查询继续对子查询中的json串做解析查询。 + + 比如暂不支持 + ```mysql + select jtag→'key' from (select jtag from stable) + ``` + + 不支持 + ```mysql + select jtag->'key' from (select jtag from stable) where jtag->'key'>0 + ``` diff --git a/documentation20/cn/14.devops/01.telegraf/docs.md b/documentation20/cn/14.devops/01.telegraf/docs.md index 485e7038f0e8aa122b20ba6608a629de66d7dc8c..ba1620fd8255f700acc5a311cef310dfe5e7ac38 100644 --- a/documentation20/cn/14.devops/01.telegraf/docs.md +++ b/documentation20/cn/14.devops/01.telegraf/docs.md @@ -33,8 +33,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ### 下载 TDengine 插件到 grafana 插件目录 ```bash -1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip -2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip +2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/ 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 5. sudo systemctl restart grafana-server.service diff --git a/documentation20/cn/14.devops/02.collectd/docs.md b/documentation20/cn/14.devops/02.collectd/docs.md index 0073cf78340a1100ec97cb70685410ced0cf5d4e..c27da8c6d8c1101b136b419eb689a309e5487b6c 100644 --- a/documentation20/cn/14.devops/02.collectd/docs.md +++ b/documentation20/cn/14.devops/02.collectd/docs.md @@ -32,8 +32,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ### 复制 TDengine 插件到 grafana 插件目录 ```bash -1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip -2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip +2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/ 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 5. sudo systemctl restart grafana-server.service diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index c4a7846283d594f7bb18ee1f448da8e2e3c52c32..984560e82b17e84855e135e78a3586543e23175a 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -575,8 +575,67 @@ public void setShort(int columnIndex, ArrayList list) throws SQLException public void setString(int columnIndex, ArrayList list, int size) throws SQLException public void setNString(int columnIndex, ArrayList list, int size) throws SQLException ``` +### Set client configuration in JDBC +Starting with TDEngine-2.3.5.0, JDBC Driver supports setting TDengine client parameters on the first connection of a Java application. The Driver supports jdbcUrl and Properties to set client parameters in JDBC-JNI mode. -### Data Subscription +Note: +* JDBC-RESTful does not support setting client parameters. +* The client parameters set in the java application are process-level. To update the client parameters, the application needs to be restarted. This is because these client parameters are global that take effect the first time the application is set up. +* The following sample code is based on taos-jdbcdriver-2.0.36. + +Sample Code: +```java +public class ClientParameterSetting { + private static final String host = "127.0.0.1"; + + public static void main(String[] args) throws SQLException { + setParameterInJdbcUrl(); + + setParameterInProperties(); + } + + private static void setParameterInJdbcUrl() throws SQLException { + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?debugFlag=135&asyncLog=0"; + + Connection connection = DriverManager.getConnection(jdbcUrl, "root", "taosdata"); + + printDatabase(connection); + + connection.close(); + } + + private static void setParameterInProperties() throws SQLException { + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; + Properties properties = new Properties(); + properties.setProperty("user", "root"); + properties.setProperty("password", "taosdata"); + properties.setProperty("debugFlag", "135"); + properties.setProperty("asyncLog", "0"); + properties.setProperty("maxSQLLength", "1048576"); + + try (Connection conn = DriverManager.getConnection(jdbcUrl, properties)) { + printDatabase(conn); + } + } + + private static void printDatabase(Connection connection) throws SQLException { + try (Statement stmt = connection.createStatement()) { + ResultSet rs = stmt.executeQuery("show databases"); + + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + } + } +} +``` + + +## Data Subscription #### Subscribe diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md index f5af01d9b189d20facdd3c0702d72f256a2b4d8e..0e15e58a531cbd783168802e919aa8095fe034bf 100644 --- a/documentation20/en/09.connections/docs.md +++ b/documentation20/en/09.connections/docs.md @@ -6,25 +6,47 @@ TDengine can be quickly integrated with [Grafana](https://www.grafana.com/), an ### Install Grafana -TDengine currently supports Grafana 6.2 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows: - -https://grafana.com/grafana/download. +TDengine currently supports Grafana 7.0 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows: . ### Configure Grafana -Download grafana plugin from . +TDengine data source plugin for Grafana is hosted on GitHub, refer to GitHub latest release page to download the latest plugin package. Currently it's version 3.1.3 . + +It is recommended to use [`grafana-cli` command line tool](https://grafana.com/docs/grafana/latest/administration/cli/) to install the plugin. + +```bash +sudo -u grafana grafana-cli \ + --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip \ + plugins install tdengine-datasource +``` + +Users could manually download the plugin package and install it to Grafana plugins directory. ```bash GF_VERSION=3.1.3 wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip ``` -Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana. +Taking Centos 7.2 as an example, just unpack the package to /var/lib/grafana/plugins directory and restart Grafana. ```bash sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/ ``` +Grafana will check the signature after 7.3 and 8.x for security. Users need additional configurations in `grafana.ini` file to allow unsigned plugins like TDengine data source. + +```ini +[plugins] +allow_loading_unsigned_plugins = tdengine-datasource +``` + +In docker/compose/k8s, simply setting the two environment variables will take it all for you. + +```bash +GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip;tdengine-datasource +GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource +``` + ### Use Grafana #### Configure data source diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md old mode 100644 new mode 100755 index 8533f92a3b59e27df61c16a2bc86961775bf84da..60f3ad44c62f9ba1123f8920ef626baa58cc1bb0 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -53,7 +53,7 @@ In TDengine, the following 10 data types can be used in data model of an ordinar | 8 | TINYINT | 1 | A nullable integer type with a range of [-127, 127] | | 9 | BOOL | 1 | Boolean type,{true, false} | | 10 | NCHAR | Custom | Used to record non-ASCII strings, such as Chinese characters. Each nchar character takes up 4 bytes of storage space. Single quotation marks are used at both ends of the string, and escape characters are required for single quotation marks in the string, that is \’. When nchar is used, the string size must be specified. A column of type nchar (10) indicates that the string of this column stores up to 10 nchar characters, which will take up 40 bytes of space. If the length of the user string exceeds the declared length, an error will be reported. | - +| 11 | JSON | | Json type,only support for tag | **Tips**: @@ -1245,3 +1245,92 @@ TAOS SQL supports join columns of two tables by Primary Key timestamp between th **Availability of is no null** Is not null supports all types of columns. Non-null expression is < > "" and only applies to columns of non-numeric types. + +**Restrictions on order by** + +- A non super table can only have one order by. +- The super table can have at most two order by expression, and the second must be ts. +- Order by tag must be the same tag as group by tag. TBNAME is as logical as tag. +- Order by ordinary column must be the same ordinary column as group by or top/bottom. If both group by and top / bottom exist, order by must be in the same column as group by. +- There are both order by and group by. The internal of the group is sorted by ts +- Order by ts. + +## JSON type instructions +- Syntax description + + 1. Create JSON type tag + + ```mysql + create stable s1 (ts timestamp, v1 int) tags (info json) + + create table s1_1 using s1 tags ('{"k1": "v1"}') + ``` + 3. JSON value operator(->) + + ```mysql + select * from s1 where info->'k1' = 'v1' + + select info->'k1' from s1 + ``` + 4. JSON key existence operator(contains) + + ```mysql + select * from s1 where info contains 'k2' + + select * from s1 where info contains 'k1' + ``` + +- Supported operations + + 1. In where condition,support match/nmatch/between and/like/and/or/is null/is no null,in operator is not support. + + ```mysql + select * from s1 where info→'k1' match 'v*'; + + select * from s1 where info→'k1' like 'v%' and info contains 'k2'; + + select * from s1 where info is null; + + select * from s1 where info->'k1' is not null + ``` + + 2. JSON tag is supported in group by、order by、join clause、union all and subquery,like group by json->'key' + + 3. Support distinct operator. + + ```mysql + select distinct info→'k1' from s1 + ``` + + 5. Tag + + Support change JSON tag(full coverage) + + Support change the name of JSON tag + + Not support add JSON tag, delete JSON tag + +- Other constraints + + 1. Only tag columns can use JSON type. If JSON tag is used, there can only be one tag column. + + 2. Length limit:The length of the key in JSON cannot exceed 256, and the key must be printable ASCII characters; The total length of JSON string does not exceed 4096 bytes. + + 3. JSON format restrictions: + + 1. JSON input string can be empty (""," ","\t" or null) or object, and cannot be nonempty string, boolean or array. + 2. Object can be {}, if the object is {}, the whole JSON string is marked as empty. The key can be "", if the key is "", the K-V pair will be ignored in the JSON string. + 3. Value can be a number (int/double) or string, bool or null, not an array. Nesting is not allowed. + 4. If two identical keys appear in the JSON string, the first one will take effect. + 5. Escape is not supported in JSON string. + + 4. Null is returned when querying the key that does not exist in JSON. + + 5. When JSON tag is used as the sub query result, parsing and querying the JSON string in the sub query is no longer supported in the upper level query. + + The following query is not supported: + ```mysql + select jtag→'key' from (select jtag from stable) + + select jtag->'key' from (select jtag from stable) where jtag->'key'>0 + ``` diff --git a/documentation20/en/14.devops/01.telegraf/docs.md b/documentation20/en/14.devops/01.telegraf/docs.md index a8b5db08ccc1131611c12fb53970115a89368376..f4270c2f8750ef6261df27e348b6f6a539447b43 100644 --- a/documentation20/en/14.devops/01.telegraf/docs.md +++ b/documentation20/en/14.devops/01.telegraf/docs.md @@ -33,8 +33,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official ### Download TDengine plugin to Grafana plugin's directory ```bash -1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip -2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip +2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/ 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 5. sudo systemctl restart grafana-server.service diff --git a/documentation20/en/14.devops/02.collectd/docs.md b/documentation20/en/14.devops/02.collectd/docs.md index 15a83d7f0c78f9e36122d4c7a0c125daddfa1c6a..3c7dcd21380a8406a754293567d340dd1e461961 100644 --- a/documentation20/en/14.devops/02.collectd/docs.md +++ b/documentation20/en/14.devops/02.collectd/docs.md @@ -32,8 +32,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official ### Download TDengine plugin to Grafana plugin's directory ```bash -1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip -2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip +2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/ 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 5. sudo systemctl restart grafana-server.service diff --git a/packaging/release.sh b/packaging/release.sh index fd2266792ebf9ed419ef3dee1ee6e146e4e9534f..866a21e552909ca9ad8e6083f4e571f5da91cc91 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -494,11 +494,13 @@ else exit 1 fi +CORES=`grep -c ^processor /proc/cpuinfo` + if [[ "$allocator" == "jemalloc" ]]; then # jemalloc need compile first, so disable parallel build - make -j 8 && ${csudo}make install + make -j ${CORES} && ${csudo}make install else - make -j 8 && ${csudo}make install + make -j ${CORES} && ${csudo}make install fi cd ${curr_dir} diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 4c3278e41b0c51e86d84c3e200092b4554e6523c..92d3f8a89cf3d985ca9149fdb9d910949285d5d8 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -43,9 +43,11 @@ if [ "$osType" != "Darwin" ]; then if [ "$pagMode" == "lite" ]; then #strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos - bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" + bin_files="${build_dir}/bin/taos \ + ${script_dir}/remove_client.sh" else - bin_files="${script_dir}/remove_client.sh \ + bin_files="${build_dir}/bin/taos \ + ${script_dir}/remove_client.sh \ ${script_dir}/set_core.sh \ ${script_dir}/get_client.sh" #${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" diff --git a/src/balance/CMakeLists.txt b/src/balance/CMakeLists.txt index 5dcff7a214f818f0d240988e9832bb9b188904e4..10ed3c77b09f5f9e552021359f45fa120a879bbe 100644 --- a/src/balance/CMakeLists.txt +++ b/src/balance/CMakeLists.txt @@ -9,3 +9,8 @@ INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(balance ${SRC}) + +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEPENDENCIES(balance jemalloc) +ENDIF () + diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 90379e6f7e5ccb5da12e6007ca0e94cfc859ee53..85c2215a2e71746889403e60ed09279e64574750 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -128,12 +128,13 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { // type length int32_t bytes = pSchema[i].bytes; pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 2); - if (pSchema[i].type == TSDB_DATA_TYPE_BINARY || pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { + + if (pSchema[i].type == TSDB_DATA_TYPE_BINARY){ bytes -= VARSTR_HEADER_SIZE; - - if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { - bytes = bytes / TSDB_NCHAR_SIZE; - } + } + else if(pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) { + bytes -= VARSTR_HEADER_SIZE; + bytes = bytes / TSDB_NCHAR_SIZE; } *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b0ea89f7c4a2e007f8ad06a8e042d665f42ee66c..b045c566f1a72b8f0ca970d8e30d1b0e6486be68 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -996,9 +996,16 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return code; } - // set the command/global limit parameters from the first subclause to the sqlcmd object - pCmd->active = pCmd->pQueryInfo; - pCmd->command = pCmd->pQueryInfo->command; + // set the command/global limit parameters from the first not empty subclause to the sqlcmd object + SQueryInfo* queryInfo = pCmd->pQueryInfo; + int16_t command = queryInfo->command; + while (command == TSDB_SQL_RETRIEVE_EMPTY_RESULT && queryInfo->sibling != NULL) { + queryInfo = queryInfo->sibling; + command = queryInfo->command; + } + + pCmd->active = queryInfo; + pCmd->command = command; STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pCmd->active, 0); if (pTableMetaInfo1->pTableMeta != NULL) { @@ -4512,13 +4519,16 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, if (TSDB_FUNC_IS_SCALAR(functionId)) { code = validateSQLExprItem(pCmd, pParamElem->pNode, pQueryInfo, pList, childrenTypes + i, uid, childrenHeight+i); if (code != TSDB_CODE_SUCCESS) { - free(childrenTypes); + tfree(childrenTypes); + tfree(childrenHeight); return code; } } if (!TSDB_FUNC_IS_SCALAR(functionId) && (pParamElem->pNode->type == SQL_NODE_EXPR || pParamElem->pNode->type == SQL_NODE_SQLFUNCTION)) { + tfree(childrenTypes); + tfree(childrenHeight); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -4540,6 +4550,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, *height = maxChildrenHeight + 1; if (anyChildAgg && anyChildScalar) { + tfree(childrenTypes); + tfree(childrenHeight); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } if (anyChildAgg) { @@ -4551,7 +4563,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, *type = SQLEXPR_TYPE_AGG; } } - free(childrenTypes); + tfree(childrenTypes); + tfree(childrenHeight); //end if param list is not null } else { if (TSDB_FUNC_IS_SCALAR(functionId)) { @@ -6312,7 +6325,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg0 = "only one column allowed in orderby"; const char* msg1 = "invalid column name in orderby clause"; const char* msg2 = "too many order by columns"; - const char* msg3 = "only primary timestamp, first tag/tbname in groupby clause allowed as order column"; + const char* msg3 = "only primary timestamp, tag/tbname in groupby clause allowed as order column"; const char* msg4 = "only tag in groupby clause allowed in order clause"; const char* msg5 = "only primary timestamp/column in top/bottom function allowed as order column"; const char* msg6 = "only primary timestamp allowed as the second order column"; @@ -6334,8 +6347,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SArray* pSortOrder = pSqlNode->pSortOrder; /* - * for table query, there is only one or none order option is allowed, which is the - * ts or values(top/bottom) order is supported. + * for table query, there is only one or none order option is allowed * * for super table query, the order option must be less than 3 and the second must be ts. * @@ -6410,7 +6422,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return invalidOperationMsg(pMsgBuf, msg4); } SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); - if (relTagIndex == pColIndex->colIndex) { + if (relTagIndex == pColIndex->colIndex && pColIndex->flag == TSDB_COL_TAG) { if (tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, pColIndex->colId)->type == TSDB_DATA_TYPE_JSON){ if(!pItem->isJsonExp){ return invalidOperationMsg(pMsgBuf, msg14); @@ -6863,7 +6875,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tscError("json type error, should be string"); return invalidOperationMsg(pMsg, msg25); } - if (pItem->pVar.nType > TSDB_MAX_JSON_TAGS_LEN / TSDB_NCHAR_SIZE) { + if (pItem->pVar.nLen > TSDB_MAX_JSON_TAGS_LEN / TSDB_NCHAR_SIZE) { tscError("json tag too long"); return invalidOperationMsg(pMsg, msg14); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 36b8363f94ba6bc0a7ca87b0e70b46c539316af5..e96e3c16da84b3ffc25b33e3864c4e38dcc3977f 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -774,11 +774,12 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo memcpy(dst, p, varDataTLen(p)); } else if (varDataLen(p) > 0) { int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst)); - varDataSetLen(dst, length); - - if (length == 0) { + if (length <= 0) { tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); } + if (length >= 0){ + varDataSetLen(dst, length); + } } else { varDataSetLen(dst, 0); } @@ -809,18 +810,23 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo varDataSetLen(dst, strlen(varDataVal(dst))); }else if (type == TSDB_DATA_TYPE_JSON) { int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(dst)); - varDataSetLen(dst, length); - if (length == 0) { + + if (length <= 0) { tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); } + if (length >= 0){ + varDataSetLen(dst, length); + } }else if (type == TSDB_DATA_TYPE_NCHAR) { // value -> "value" *(char*)varDataVal(dst) = '\"'; int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), POINTER_SHIFT(varDataVal(dst), CHAR_BYTES)); - *(char*)(POINTER_SHIFT(varDataVal(dst), length + CHAR_BYTES)) = '\"'; - varDataSetLen(dst, length + CHAR_BYTES*2); - if (length == 0) { + if (length <= 0) { tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); } + if (length >= 0){ + varDataSetLen(dst, length + CHAR_BYTES*2); + *(char*)(POINTER_SHIFT(varDataVal(dst), length + CHAR_BYTES)) = '\"'; + } }else if (type == TSDB_DATA_TYPE_DOUBLE) { double jsonVd = *(double*)(realData); sprintf(varDataVal(dst), "%.9lf", jsonVd); @@ -5186,7 +5192,8 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt } // global aggregate query - if (pQueryAttr->stableQuery && (pQueryAttr->simpleAgg || pQueryAttr->interval.interval > 0) && tscIsTwoStageSTableQuery(pQueryInfo, 0)) { + if (pQueryAttr->stableQuery && (pQueryAttr->simpleAgg || pQueryAttr->interval.interval > 0 || pQueryAttr->sw.gap > 0) + && tscIsTwoStageSTableQuery(pQueryInfo, 0)) { createGlobalAggregateExpr(pQueryAttr, pQueryInfo); } @@ -5514,10 +5521,10 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in char *tagVal = calloc(strlen(jsonValue) * TSDB_NCHAR_SIZE + TSDB_NCHAR_SIZE, 1); *tagVal = jsonType2DbType(0, item->type); // type char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES); - if (!taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData), + if (strlen(jsonValue) > 0 && !taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData), (int32_t)(strlen(jsonValue) * TSDB_NCHAR_SIZE), &outLen)) { - tscError("json string error:%s|%s", strerror(errno), jsonValue); - retCode = tscSQLSyntaxErrMsg(errMsg, "serizelize json error", NULL); + tscError("charset:%s to %s. val:%s, errno:%s, convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, jsonValue, strerror(errno)); + retCode = tscSQLSyntaxErrMsg(errMsg, "charset convert json error", NULL); free(tagVal); goto end; } diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index 63a19c5c1b74028b8536982b0b2445c06de121b4..db3bec2ddfeee60a90df33cae4c9770bfdb7c46f 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -27,6 +27,7 @@ #include "tskiplist.h" #include "texpr.h" #include "tarithoperator.h" +#include "tulog.h" static int32_t exprValidateMathNode(tExprNode *pExpr); static int32_t exprValidateStringConcatNode(tExprNode *pExpr); @@ -1274,6 +1275,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out } else if (inputType == TSDB_DATA_TYPE_NCHAR) { char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1); int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData); + if (len < 0){ + uError("castConvert taosUcs4ToMbs error 1"); + tfree(newColData); + return; + } newColData[len] = 0; *(int64_t *)output = strtoll(newColData, NULL, 10); tfree(newColData); @@ -1291,6 +1297,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out } else if (inputType == TSDB_DATA_TYPE_NCHAR) { char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1); int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData); + if (len < 0){ + uError("castConvert taosUcs4ToMbs error 2"); + tfree(newColData); + return; + } newColData[len] = 0; *(int64_t *)output = strtoull(newColData, NULL, 10); tfree(newColData); @@ -1332,11 +1343,19 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out if (inputType == TSDB_DATA_TYPE_BOOL) { char tmp[8] = {0}; int32_t len = sprintf(tmp, "%.*s", ncharSize, *(int8_t*)input ? "true" : "false"); - taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + bool ret = taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + if(!ret) { + uError("castConvert1 taosMbsToUcs4 error"); + return; + } varDataSetLen(output, len); } else if (inputType == TSDB_DATA_TYPE_BINARY) { int32_t len = ncharSize > varDataLen(input) ? varDataLen(input) : ncharSize; - taosMbsToUcs4(input + VARSTR_HEADER_SIZE, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + bool ret = taosMbsToUcs4(input + VARSTR_HEADER_SIZE, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + if(!ret) { + uError("castConvert2 taosMbsToUcs4 error"); + return; + } varDataSetLen(output, len); } else if (inputType == TSDB_DATA_TYPE_TIMESTAMP) { assert(0); @@ -1348,7 +1367,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out char tmp[400] = {0}; NUM_TO_STRING(inputType, input, sizeof(tmp), tmp); int32_t len = (int32_t)(ncharSize > strlen(tmp) ? strlen(tmp) : ncharSize); - taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + bool ret = taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); + if(!ret) { + uError("castConvert3 taosMbsToUcs4 error"); + return; + } varDataSetLen(output, len); } break; diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index ee85d2f0086bb2676cb8699ad6bc2acaa33c5fbb..0d00856f9be76ee917d12ff7435142d6d55ccecf 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -23,6 +23,7 @@ #include "ttype.h" #include "tutil.h" #include "tvariant.h" +#include "tulog.h" #define SET_EXT_INFO(converted, res, minv, maxv, exti) do { \ if (converted == NULL || exti == NULL || *converted == false) { break; } \ @@ -359,8 +360,12 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { case TSDB_DATA_TYPE_NCHAR: { dst[0] = '\''; - taosUcs4ToMbs(pVar->wpz, (twcslen(pVar->wpz) + 1) * TSDB_NCHAR_SIZE, dst + 1); - int32_t len = (int32_t)strlen(dst); + int32_t len = taosUcs4ToMbs(pVar->wpz, (twcslen(pVar->wpz) + 1) * TSDB_NCHAR_SIZE, dst + 1); + if (len < 0){ + uError("castConvert1 taosUcs4ToMbs error"); + return 0 ; + } + len = (int32_t)strlen(dst); dst[len] = '\''; dst[len + 1] = 0; return len + 1; @@ -428,11 +433,17 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { pBuf = realloc(pBuf, newSize + 1); } - taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, pBuf); + int32_t len = taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, pBuf); + if (len < 0){ + uError("castConvert1 taosUcs4ToMbs error"); + } free(pVariant->wpz); pBuf[newSize] = 0; } else { - taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, *pDest); + int32_t len = taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, *pDest); + if (len < 0){ + uError("castConvert1 taosUcs4ToMbs error"); + } } } else { diff --git a/src/connector/C#/src/TDengineDriver/TDengineDriver.cs b/src/connector/C#/src/TDengineDriver/TDengineDriver.cs index 42bba438522db2a1c238609036e2b5be8b37929f..15e0ca0841c0022439c00fc1b7357b770ccb14f6 100644 --- a/src/connector/C#/src/TDengineDriver/TDengineDriver.cs +++ b/src/connector/C#/src/TDengineDriver/TDengineDriver.cs @@ -398,5 +398,8 @@ namespace TDengineDriver IntPtr stmtErrPrt = StmtErrPtr(stmt); return Marshal.PtrToStringAnsi(stmtErrPrt); } + + [DllImport("taos", EntryPoint = "taos_fetch_lengths", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchLengths(IntPtr taos); } } diff --git a/src/connector/C#/src/test/Cases/Cases.csproj b/src/connector/C#/src/test/Cases/Cases.csproj index f2ae6938fb4b8c58d9bb657e5fb504814068e92e..57c0dd8f7d363e9da4ae580751cacf706f714883 100644 --- a/src/connector/C#/src/test/Cases/Cases.csproj +++ b/src/connector/C#/src/test/Cases/Cases.csproj @@ -9,4 +9,8 @@ net5.0 + + true + ..\doc\FunctionTest.XML + diff --git a/src/connector/C#/src/test/Cases/FetchLength.cs b/src/connector/C#/src/test/Cases/FetchLength.cs new file mode 100644 index 0000000000000000000000000000000000000000..b5c5c4ecadcd1ff67060a62ac6cfb460e65a530d --- /dev/null +++ b/src/connector/C#/src/test/Cases/FetchLength.cs @@ -0,0 +1,44 @@ +using System; +using Test.UtilsTools; +using System.Collections.Generic; + +namespace Cases +{ + + public class FetchLengthCase + { + /// xiaolei + /// TestRetrieveBinary + /// TD-12103 C# connector fetch_row with binary data retrieving error + /// FetchLength.cs + /// pass or failed + public void TestRetrieveBinary(IntPtr conn) + { + string sql1 = "create stable stb1 (ts timestamp, name binary(10)) tags(n int);"; + string sql2 = "insert into tb1 using stb1 tags(1) values(now, 'log');"; + string sql3 = "insert into tb2 using stb1 tags(2) values(now, 'test');"; + string sql4 = "insert into tb3 using stb1 tags(3) values(now, 'db02');"; + string sql5 = "insert into tb4 using stb1 tags(4) values(now, 'db3');"; + + string sql6 = "select distinct(name) from stb1;";// + + UtilsTools.ExecuteQuery(conn, sql1); + UtilsTools.ExecuteQuery(conn, sql2); + UtilsTools.ExecuteQuery(conn, sql3); + UtilsTools.ExecuteQuery(conn, sql4); + UtilsTools.ExecuteQuery(conn, sql5); + + IntPtr resPtr = IntPtr.Zero; + resPtr = UtilsTools.ExecuteQuery(conn, sql6); + List> result = UtilsTools.GetResultSet(resPtr); + + List colname = result[0]; + List data = result[1]; + UtilsTools.AssertEqual("db3", data[0]); + UtilsTools.AssertEqual("log", data[1]); + UtilsTools.AssertEqual("db02", data[2]); + UtilsTools.AssertEqual("test", data[3]); + + } + } +} diff --git a/src/connector/C#/src/test/Cases/Program.cs b/src/connector/C#/src/test/Cases/Program.cs index a1b47f3890134040d3060fdb7b1e8d3beed4b7dd..89f878e994aa35977fc69c5576bca0ec21c41882 100644 --- a/src/connector/C#/src/test/Cases/Program.cs +++ b/src/connector/C#/src/test/Cases/Program.cs @@ -1,64 +1,67 @@ -using System; -using Test.UtilsTools; -using Cases; - -namespace Cases.EntryPoint -{ - class Program - { - - static void Main(string[] args) - { - IntPtr conn = IntPtr.Zero; - IntPtr stmt = IntPtr.Zero; - IntPtr res = IntPtr.Zero; - - conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0); - UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); - UtilsTools.ExecuteQuery(conn, "create database if not exists csharp keep 3650"); - UtilsTools.ExecuteQuery(conn, "use csharp"); - - Console.WriteLine("====================StableColumnByColumn==================="); - StableColumnByColumn columnByColumn = new StableColumnByColumn(); - columnByColumn.Test(conn, "stablecolumnbycolumn"); - Console.WriteLine("====================StmtStableQuery==================="); - StmtStableQuery stmtStableQuery = new StmtStableQuery(); - stmtStableQuery.Test(conn, "stablecolumnbycolumn"); - - Console.WriteLine("====================StableMutipleLine==================="); - StableMutipleLine mutipleLine = new StableMutipleLine(); - mutipleLine.Test(conn, "stablemutipleline"); - - //================================================================================ - - Console.WriteLine("====================NtableSingleLine==================="); - NtableSingleLine ntableSingleLine = new NtableSingleLine(); - ntableSingleLine.Test(conn, "stablesingleline"); - - Console.WriteLine("====================NtableMutipleLine==================="); - NtableMutipleLine ntableMutipleLine = new NtableMutipleLine(); - ntableMutipleLine.Test(conn, "ntablemutipleline"); - Console.WriteLine("====================StmtNtableQuery==================="); - StmtNtableQuery stmtNtableQuery = new StmtNtableQuery(); - stmtNtableQuery.Test(conn, "ntablemutipleline"); - - Console.WriteLine("====================NtableColumnByColumn==================="); - NtableColumnByColumn ntableColumnByColumn = new NtableColumnByColumn(); - ntableColumnByColumn.Test(conn, "ntablecolumnbycolumn"); - - Console.WriteLine("====================fetchfeilds==================="); - FetchFields fetchFields = new FetchFields(); - fetchFields.Test(conn,"fetchfeilds"); - - Console.WriteLine("===================JsonTagTest===================="); - JsonTagTest jsonTagTest = new JsonTagTest(); - jsonTagTest.Test(conn); - - // UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); - UtilsTools.CloseConnection(conn); - UtilsTools.ExitProgram(); - - - } - } -} +using System; +using Test.UtilsTools; +using Cases; + +namespace Cases.EntryPoint +{ + class Program + { + + static void Main(string[] args) + { + IntPtr conn = IntPtr.Zero; + IntPtr stmt = IntPtr.Zero; + IntPtr res = IntPtr.Zero; + + conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0); + UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); + UtilsTools.ExecuteQuery(conn, "create database if not exists csharp keep 3650"); + UtilsTools.ExecuteQuery(conn, "use csharp"); + + Console.WriteLine("====================StableColumnByColumn==================="); + StableColumnByColumn columnByColumn = new StableColumnByColumn(); + columnByColumn.Test(conn, "stablecolumnbycolumn"); + Console.WriteLine("====================StmtStableQuery==================="); + StmtStableQuery stmtStableQuery = new StmtStableQuery(); + stmtStableQuery.Test(conn, "stablecolumnbycolumn"); + + Console.WriteLine("====================StableMutipleLine==================="); + StableMutipleLine mutipleLine = new StableMutipleLine(); + mutipleLine.Test(conn, "stablemutipleline"); + + //================================================================================ + + Console.WriteLine("====================NtableSingleLine==================="); + NtableSingleLine ntableSingleLine = new NtableSingleLine(); + ntableSingleLine.Test(conn, "stablesingleline"); + + Console.WriteLine("====================NtableMutipleLine==================="); + NtableMutipleLine ntableMutipleLine = new NtableMutipleLine(); + ntableMutipleLine.Test(conn, "ntablemutipleline"); + Console.WriteLine("====================StmtNtableQuery==================="); + StmtNtableQuery stmtNtableQuery = new StmtNtableQuery(); + stmtNtableQuery.Test(conn, "ntablemutipleline"); + + Console.WriteLine("====================NtableColumnByColumn==================="); + NtableColumnByColumn ntableColumnByColumn = new NtableColumnByColumn(); + ntableColumnByColumn.Test(conn, "ntablecolumnbycolumn"); + + Console.WriteLine("====================fetchfeilds==================="); + FetchFields fetchFields = new FetchFields(); + fetchFields.Test(conn, "fetchfeilds"); + + Console.WriteLine("===================JsonTagTest===================="); + JsonTagTest jsonTagTest = new JsonTagTest(); + jsonTagTest.Test(conn); + + Console.WriteLine("====================fetchLengthCase==================="); + FetchLengthCase fetchLengthCase = new FetchLengthCase(); + fetchLengthCase.TestRetrieveBinary(conn); + + UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); + UtilsTools.CloseConnection(conn); + UtilsTools.ExitProgram(); + + } + } +} diff --git a/src/connector/C#/src/test/Cases/Utils.cs b/src/connector/C#/src/test/Cases/Utils.cs index a549d75b16f76539b0f19d73eab576d0d9d582db..7877601e0adbc38c186bd44456ceb3005d806ff1 100644 --- a/src/connector/C#/src/test/Cases/Utils.cs +++ b/src/connector/C#/src/test/Cases/Utils.cs @@ -35,7 +35,6 @@ namespace Test.UtilsTools else { Console.WriteLine(sql.ToString() + " success"); - } return res; } @@ -83,9 +82,13 @@ namespace Test.UtilsTools IntPtr rowdata; StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) { queryRows++; + IntPtr colLengthPtr = TDengine.FetchLengths(res); + int[] colLengthArr = new int[fieldCount]; + Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount); for (int fields = 0; fields < fieldCount; ++fields) { TDengineMeta meta = metas[fields]; @@ -131,7 +134,7 @@ namespace Test.UtilsTools builder.Append(v7); break; case TDengineDataType.TSDB_DATA_TYPE_BINARY: - string v8 = Marshal.PtrToStringAnsi(data); + string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]); builder.Append(v8); break; case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: @@ -139,7 +142,7 @@ namespace Test.UtilsTools builder.Append(v9); break; case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - string v10 = Marshal.PtrToStringAnsi(data); + string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]); builder.Append(v10); break; case TDengineDataType.TSDB_DATA_TYPE_JSONTAG: @@ -164,6 +167,117 @@ namespace Test.UtilsTools TDengine.FreeResult(res); Console.WriteLine(""); } + public static List> GetResultSet(IntPtr res) + { + List> result = new List>(); + List colName = new List(); + List dataRaw = new List(); + long queryRows = 0; + if (!IsValidResult(res)) + { + ExitProgram(); + } + + int fieldCount = TDengine.FieldCount(res); + List metas = TDengine.FetchFields(res); + + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + colName.Add(meta.name); + } + result.Add(colName); + + IntPtr rowdata; + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + IntPtr colLengthPtr = TDengine.FetchLengths(res); + int[] colLengthArr = new int[fieldCount]; + Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount); + + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + if (data == IntPtr.Zero) + { + dataRaw.Add("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + dataRaw.Add(v1.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + dataRaw.Add(v2.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + dataRaw.Add(v3.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + dataRaw.Add(v4.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + dataRaw.Add(v5.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + dataRaw.Add(v6.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + dataRaw.Add(v7.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]); + dataRaw.Add(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + dataRaw.Add(v9.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]); + dataRaw.Add(v10); + break; + } + } + + } + result.Add(dataRaw); + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + TDengine.FreeResult(res); Console.WriteLine(""); + return result; + } + + public static bool IsValidResult(IntPtr res) + { + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + return false; + } + Console.WriteLine(""); + return false; + } + return true; + } public static void CloseConnection(IntPtr conn) { if (conn != IntPtr.Zero) @@ -183,6 +297,18 @@ namespace Test.UtilsTools List metas = TDengine.FetchFields(res); return metas; } + public static void AssertEqual(string expectVal, string actualVal) + { + if (expectVal == actualVal) + { + Console.WriteLine("{0}=={1} pass", expectVal, actualVal); + } + else + { + Console.WriteLine("{0}=={1} failed", expectVal, actualVal); + ExitProgram(); + } + } public static void ExitProgram() { TDengine.Cleanup(); diff --git a/src/connector/C#/src/test/XUnitTest/XUnitTest.csproj b/src/connector/C#/src/test/XUnitTest/XUnitTest.csproj index 97d13e5e9e74abada2efa27d64e57adbe5459023..997a9d6fe072c01ffeb45a32773f8c76a530825c 100644 --- a/src/connector/C#/src/test/XUnitTest/XUnitTest.csproj +++ b/src/connector/C#/src/test/XUnitTest/XUnitTest.csproj @@ -2,10 +2,14 @@ net5.0 - false + + true + ..\doc\UnitTest.XML + + runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java index d8ac10d839651bb476a8688f28917aa356b5b1fe..9f573452b1aacbaaf8593433a0b0c5986ad9d3aa 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -443,16 +443,29 @@ public class TSDBResultSetRowData { case 0: { milliseconds = ts; fracNanoseconds = (int) (ts * 1_000_000 % 1_000_000_000); + fracNanoseconds = fracNanoseconds < 0 ? 1_000_000_000 + fracNanoseconds : fracNanoseconds; break; } case 1: { milliseconds = ts / 1_000; fracNanoseconds = (int) (ts * 1_000 % 1_000_000_000); + if (fracNanoseconds < 0) { + if (milliseconds == 0 ){ + milliseconds = -1; + } + fracNanoseconds += 1_000_000_000; + } break; } case 2: { milliseconds = ts / 1_000_000; fracNanoseconds = (int) (ts % 1_000_000_000); + if (fracNanoseconds < 0) { + if (milliseconds == 0 ){ + milliseconds = -1; + } + fracNanoseconds += 1_000_000_000; + } break; } default: { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java index bfffaa4a129dc7fe19a92c34abbcc886d5e4e22f..4f7f123f5c8b135e757919c8913f1f1bb032d98e 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java @@ -1,23 +1,36 @@ package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.annotation.CatalogRunner; +import com.taosdata.jdbc.annotation.Description; +import com.taosdata.jdbc.annotation.TestTarget; import com.taosdata.jdbc.utils.TimestampUtil; import org.junit.*; +import org.junit.runner.RunWith; import java.sql.*; +@RunWith(CatalogRunner.class) +@TestTarget(alias = "negative value convert to timestamp", author = "huolibo", version = "2.0.37") public class DatetimeBefore1970Test { private static final String host = "127.0.0.1"; private Connection conn; @Test - public void test() throws SQLException { + @Description("millisecond") + public void msTest() throws SQLException { + conn = createEnvironment("ms"); + long now = System.currentTimeMillis(); try (Statement stmt = conn.createStatement()) { // given + // before + stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.001')"); stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999')"); + // zero stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')"); - stmt.executeUpdate("insert into weather(ts) values('1970-01-01 08:00:00.000')"); - stmt.executeUpdate("insert into weather(ts) values('1970-01-01 07:59:59.999')"); + //after + stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.001')"); + stmt.executeUpdate("insert into weather(ts) values(" + now + ")"); ResultSet rs = stmt.executeQuery("select * from weather order by ts asc"); ResultSetMetaData metaData = rs.getMetaData(); Assert.assertEquals(2, metaData.getColumnCount()); @@ -26,44 +39,221 @@ public class DatetimeBefore1970Test { rs.next(); // then Timestamp ts = rs.getTimestamp("ts"); - Assert.assertEquals("1969-12-31 23:59:59.999", TimestampUtil.longToDatetime(ts.getTime())); + Assert.assertEquals(-24 * 60 * 60 * 1000 + 1, ts.getTime()); // when rs.next(); // then ts = rs.getTimestamp("ts"); - Assert.assertEquals("1970-01-01 00:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); + Assert.assertEquals(-1, ts.getTime()); // when rs.next(); // then ts = rs.getTimestamp("ts"); - Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); + Assert.assertEquals(0, ts.getTime()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals(1, ts.getTime()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals(now, ts.getTime()); + } + } + + @Test + @Description("microsecond") + public void usTest() throws SQLException { + conn = createEnvironment("us"); + long now = System.currentTimeMillis(); + try (Statement stmt = conn.createStatement()) { + // given + stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000001')"); + stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999')"); + stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000')"); + stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000001')"); + stmt.executeUpdate("insert into weather(ts) values(" + now + ")"); + ResultSet rs = stmt.executeQuery("select * from weather order by ts asc"); + ResultSetMetaData metaData = rs.getMetaData(); + Assert.assertEquals(2, metaData.getColumnCount()); + + // when + rs.next(); + // then + Timestamp ts = rs.getTimestamp("ts"); + Assert.assertEquals(-24 * 60 * 60 * 1000, ts.getTime()); + Assert.assertEquals(1_000, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals(-1, ts.getTime()); + Assert.assertEquals(999_999_000, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals(0, ts.getTime()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals(0, ts.getTime()); + Assert.assertEquals(1_000, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + String s = String.valueOf(now); + Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 3)), ts.getTime()); + Assert.assertEquals(Long.parseLong(s.substring(s.length() - 6) + "000"), ts.getNanos()); + } + } + + @Test + @Description("nanosecond") + public void nanoTest() throws SQLException { + conn = createEnvironment("ns"); + long now = System.currentTimeMillis() * 1000_000L + System.nanoTime() % 1000_000L; + try (Statement stmt = conn.createStatement()) { + // given + stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000000123')"); + stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999999')"); + stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')"); + stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000001')"); + stmt.executeUpdate("insert into weather(ts) values(" + now + ")"); + ResultSet rs = stmt.executeQuery("select * from weather order by ts asc"); + ResultSetMetaData metaData = rs.getMetaData(); + Assert.assertEquals(2, metaData.getColumnCount()); + + // when + rs.next(); + // then + Timestamp ts = rs.getTimestamp("ts"); + Assert.assertEquals(-24 * 60 * 60 * 1_000, ts.getTime()); + Assert.assertEquals(123, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals(-1, ts.getTime()); + Assert.assertEquals(999999999, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals(0, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals(1, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + String s = String.valueOf(now); + Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 6)), ts.getTime()); + Assert.assertEquals(Long.parseLong(s.substring(s.length() - 9)), ts.getNanos()); + } + } + + @Test + @Ignore + @Description("nanosecond convert timestamp when timezone is asia shanghai") + public void asiaShanghaiTest() throws SQLException { + conn = createEnvironment("ns"); + long now = System.currentTimeMillis() * 1000_000L + System.nanoTime() % 1000_000L; + try (Statement stmt = conn.createStatement()) { + // given + stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000000123')"); + stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999999')"); + stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')"); + stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000001')"); + stmt.executeUpdate("insert into weather(ts) values(" + now + ")"); + ResultSet rs = stmt.executeQuery("select * from weather order by ts asc"); + ResultSetMetaData metaData = rs.getMetaData(); + Assert.assertEquals(2, metaData.getColumnCount()); + + // when + rs.next(); + // then + Timestamp ts = rs.getTimestamp("ts"); + Assert.assertEquals("1969-12-31 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); + Assert.assertEquals(123, ts.getNanos()); // when rs.next(); // then ts = rs.getTimestamp("ts"); Assert.assertEquals("1970-01-01 07:59:59.999", TimestampUtil.longToDatetime(ts.getTime())); + Assert.assertEquals(999999999, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); + Assert.assertEquals(0, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); + Assert.assertEquals(1, ts.getNanos()); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + String s = String.valueOf(now); + Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 6)), ts.getTime()); + Assert.assertEquals(Long.parseLong(s.substring(s.length() - 9)), ts.getNanos()); } } - @Before - public void before() throws SQLException { - conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + private Connection createEnvironment(String precision) throws SQLException { + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata&timezone=UTC"; + String createSql = "create database if not exists test_timestamp keep 36500"; + if (!isEmpty(precision)) { + createSql += " precision '" + precision + "'"; + } + conn = DriverManager.getConnection(url); Statement stmt = conn.createStatement(); stmt.execute("drop database if exists test_timestamp"); - stmt.execute("create database if not exists test_timestamp keep 36500"); + stmt.execute(createSql); stmt.execute("use test_timestamp"); stmt.execute("create table weather(ts timestamp,f1 float)"); stmt.close(); + return conn; + } + + private boolean isEmpty(String string) { + return null == string || string.trim().equals(""); } @After public void after() throws SQLException { - Statement stmt = conn.createStatement(); - stmt.execute("drop database if exists test_timestamp"); - if (conn != null) + if (conn != null) { + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test_timestamp"); + stmt.close(); conn.close(); + } } } diff --git a/src/connector/python/README.md b/src/connector/python/README.md index 1bde964828f1c52bf65e62ef67f2fdb7fc90c355..e4fce45a59bea74ea64a5da082767579459d4196 100644 --- a/src/connector/python/README.md +++ b/src/connector/python/README.md @@ -41,6 +41,7 @@ cursor.execute("show databases") results = cursor.fetchall() for row in results: print(row) + cursor.close() conn.close() ``` @@ -57,8 +58,10 @@ result = conn.query("show databases") num_of_fields = result.field_count for field in result.fields: print(field) + for row in result: print(row) + result.close() conn.execute("drop database pytest") conn.close() @@ -75,12 +78,13 @@ def fetch_callback(p_param, p_result, num_of_rows): print("fetched ", num_of_rows, "rows") p = cast(p_param, POINTER(Counter)) result = TaosResult(p_result) - + if num_of_rows == 0: print("fetching completed") p.contents.done = True result.close() return + if num_of_rows < 0: p.contents.done = True result.check_error(num_of_rows) @@ -90,6 +94,7 @@ def fetch_callback(p_param, p_result, num_of_rows): for row in result.rows_iter(num_of_rows): # print(row) None + p.contents.count += result.row_count result.fetch_rows_a(fetch_callback, p_param) @@ -97,17 +102,19 @@ def fetch_callback(p_param, p_result, num_of_rows): def query_callback(p_param, p_result, code): # type: (c_void_p, c_void_p, c_int) -> None - if p_result == None: + if p_result is None: return + result = TaosResult(p_result) if code == 0: result.fetch_rows_a(fetch_callback, p_param) + result.check_error(code) class Counter(Structure): _fields_ = [("count", c_int), ("done", c_bool)] - + def __str__(self): return "{ count: %d, done: %s }" % (self.count, self.done) @@ -116,10 +123,11 @@ def test_query(conn): # type: (TaosConnection) -> None counter = Counter(count=0) conn.query_a("select * from log.log", query_callback, byref(counter)) - + while not counter.done: print("wait query callback") time.sleep(1) + print(counter) conn.close() @@ -182,6 +190,7 @@ result = conn.query("select * from log") for row in result: print(row) + result.close() stmt.close() conn.close() @@ -237,18 +246,20 @@ result.close() result = conn.query("select * from log") for row in result: print(row) + result.close() stmt.close() conn.close() ``` -### Statement API - Subscribe +### Subscription ```python import taos +import random conn = taos.connect() -dbname = "pytest_taos_subscribe_callback" +dbname = "pytest_taos_subscribe" conn.execute("drop database if exists %s" % dbname) conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) @@ -256,7 +267,7 @@ conn.execute("create table if not exists log(ts timestamp, n int)") for i in range(10): conn.execute("insert into log values(now, %d)" % i) -sub = conn.subscribe(True, "test", "select * from log", 1000) +sub = conn.subscribe(False, "test", "select * from log", 1000) print("# consume from begin") for ts, n in sub.consume(): print(ts, n) @@ -268,9 +279,18 @@ for i in range(5): for ts, n in result: print(ts, n) +sub.close(True) +print("# keep progress consume") +sub = conn.subscribe(False, "test", "select * from log", 1000) +result = sub.consume() +rows = result.fetch_all() +# consume from latest subscription needs root privilege(for /var/lib/taos). +assert result.row_count == 0 +print("## consumed ", len(rows), "rows") + print("# consume with a stop condition") for i in range(10): - conn.execute("insert into log values(now, %d)" % int(random() * 10)) + conn.execute("insert into log values(now, %d)" % random.randint(0, 10)) result = sub.consume() try: ts, n = next(result) @@ -283,12 +303,13 @@ for i in range(10): continue sub.close() +# sub.close() conn.execute("drop database if exists %s" % dbname) -conn.close() +# conn.close() ``` -### Statement API - Subscribe asynchronously with callback +### Subscription asynchronously with callback ```python from taos import * @@ -300,7 +321,7 @@ import time def subscribe_callback(p_sub, p_result, p_param, errno): # type: (c_void_p, c_void_p, c_void_p, c_int) -> None print("# fetch in callback") - result = TaosResult(p_result) + result = TaosResult(c_void_p(p_result)) result.check_error(errno) for row in result.rows_iter(): ts, n = row() @@ -311,42 +332,45 @@ def test_subscribe_callback(conn): # type: (TaosConnection) -> None dbname = "pytest_taos_subscribe_callback" try: + print("drop if exists") conn.execute("drop database if exists %s" % dbname) + print("create database") conn.execute("create database if not exists %s" % dbname) - conn.select_db(dbname) - conn.execute("create table if not exists log(ts timestamp, n int)") - + print("create table") + # conn.execute("use %s" % dbname) + conn.execute("create table if not exists %s.log(ts timestamp, n int)" % dbname) + print("# subscribe with callback") - sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback) - + sub = conn.subscribe(False, "test", "select * from %s.log" % dbname, 1000, subscribe_callback) + for i in range(10): - conn.execute("insert into log values(now, %d)" % i) + conn.execute("insert into %s.log values(now, %d)" % (dbname, i)) time.sleep(0.7) + sub.close() - + conn.execute("drop database if exists %s" % dbname) - conn.close() + # conn.close() except Exception as err: conn.execute("drop database if exists %s" % dbname) - conn.close() + # conn.close() raise err if __name__ == "__main__": test_subscribe_callback(connect()) - ``` -### Statement API - Stream +### Stream ```python from taos import * from ctypes import * +import time def stream_callback(p_param, p_result, p_row): # type: (c_void_p, c_void_p, c_void_p) -> None - - if p_result == None or p_row == None: + if p_result is None or p_row is None: return result = TaosResult(p_result) row = TaosRow(result, p_row) @@ -355,13 +379,12 @@ def stream_callback(p_param, p_result, p_row): p = cast(p_param, POINTER(Counter)) p.contents.count += count print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count)) - except Exception as err: print(err) raise err -class Counter(ctypes.Structure): +class Counter(Structure): _fields_ = [ ("count", c_int), ] @@ -378,16 +401,17 @@ def test_stream(conn): conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) conn.execute("create table if not exists log(ts timestamp, n int)") - + result = conn.query("select count(*) from log interval(5s)") assert result.field_count == 2 counter = Counter() counter.count = 0 stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter)) - + for _ in range(0, 20): conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)") time.sleep(2) + stream.close() conn.execute("drop database if exists %s" % dbname) conn.close() @@ -399,12 +423,14 @@ def test_stream(conn): if __name__ == "__main__": test_stream(connect()) + ``` ### Insert with line protocol ```python import taos +from taos import SmlProtocol, SmlPrecision conn = taos.connect() dbname = "pytest_line" @@ -413,29 +439,22 @@ conn.execute("create database if not exists %s precision 'us'" % dbname) conn.select_db(dbname) lines = [ - 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns', - 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', - 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000', ] -conn.schemaless_insert(lines, 0, "ns") +conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED) print("inserted") -lines = [ - 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', -] -conn.schemaless_insert(lines, 0, "ns") +conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED) result = conn.query("show tables") for row in result: print(row) -result.close() conn.execute("drop database if exists %s" % dbname) -conn.close() ``` -## License - AGPL-3.0 +## License -Keep same with [TDengine](https://github.com/taosdata/TDengine). +We use MIT license for Python connector. diff --git a/src/connector/python/examples/query-async.py b/src/connector/python/examples/query-async.py index b600b796974e47d5e5fc7d88998e95ba46bb92cd..585db2344eda4c5d38c2868c35f4d91c50926880 100644 --- a/src/connector/python/examples/query-async.py +++ b/src/connector/python/examples/query-async.py @@ -29,7 +29,7 @@ def fetch_callback(p_param, p_result, num_of_rows): def query_callback(p_param, p_result, code): # type: (c_void_p, c_void_p, c_int) -> None - if p_result == None: + if p_result is None: return result = TaosResult(p_result) if code == 0: diff --git a/src/connector/python/examples/stream.py b/src/connector/python/examples/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..73cbd03c493f4441d661f924bf648bc8992aeb0a --- /dev/null +++ b/src/connector/python/examples/stream.py @@ -0,0 +1,59 @@ +from taos import * +from ctypes import * +import time + +def stream_callback(p_param, p_result, p_row): + # type: (c_void_p, c_void_p, c_void_p) -> None + if p_result is None or p_row is None: + return + result = TaosResult(p_result) + row = TaosRow(result, p_row) + try: + ts, count = row() + p = cast(p_param, POINTER(Counter)) + p.contents.count += count + print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count)) + except Exception as err: + print(err) + raise err + + +class Counter(Structure): + _fields_ = [ + ("count", c_int), + ] + + def __str__(self): + return "%d" % self.count + + +def test_stream(conn): + # type: (TaosConnection) -> None + dbname = "pytest_taos_stream" + try: + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) + conn.select_db(dbname) + conn.execute("create table if not exists log(ts timestamp, n int)") + + result = conn.query("select count(*) from log interval(5s)") + assert result.field_count == 2 + counter = Counter() + counter.count = 0 + stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter)) + + for _ in range(0, 20): + conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)") + time.sleep(2) + + stream.close() + conn.execute("drop database if exists %s" % dbname) + conn.close() + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + + +if __name__ == "__main__": + test_stream(connect()) diff --git a/src/connector/python/taos/__init__.py b/src/connector/python/taos/__init__.py index 7ebfa8adef6a82c979ad0544a3eb11ccd351b760..739265ef579b6a5127df8ee592b73293f113a2ef 100644 --- a/src/connector/python/taos/__init__.py +++ b/src/connector/python/taos/__init__.py @@ -86,7 +86,7 @@ def fetch_callback(p_param, p_result, num_of_rows): def query_callback(p_param, p_result, code): # type: (c_void_p, c_void_p, c_int) -> None - if p_result == None: + if p_result is None: return result = TaosResult(p_result) if code == 0: @@ -335,7 +335,7 @@ from ctypes import * def stream_callback(p_param, p_result, p_row): # type: (c_void_p, c_void_p, c_void_p) -> None - if p_result == None or p_row == None: + if p_result is None or p_row is None: return result = TaosResult(p_result) row = TaosRow(result, p_row) diff --git a/src/connector/python/taos/bind.py b/src/connector/python/taos/bind.py index 05659714ef86da3bda383bfe7d7b25403848637f..8f39278c960c285f4a8c0bfc1d8b198bb4a56f4c 100644 --- a/src/connector/python/taos/bind.py +++ b/src/connector/python/taos/bind.py @@ -317,7 +317,7 @@ class TaosMultiBind(ctypes.Structure): def _str_to_buffer(self, values): self.num = len(values) - is_null = [1 if v == None else 0 for v in values] + is_null = [1 if v is None else 0 for v in values] self.is_null = cast((c_byte * self.num)(*is_null), c_char_p) if sum(is_null) == self.num: diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 740af5838235a6abc41ae27e7c6a462c30977616..be39d2291a908b9349599ba13e92a205696516c7 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -373,9 +373,9 @@ def taos_fetch_block(result, fields=None, field_count=None): if num_of_rows == 0: return None, 0 precision = taos_result_precision(result) - if fields == None: + if fields is None: fields = taos_fetch_fields(result) - if field_count == None: + if field_count is None: field_count = taos_field_count(result) blocks = [None] * field_count fieldLen = taos_fetch_lengths(result, field_count) @@ -466,7 +466,7 @@ def taos_fetch_lengths(result, field_count=None): # type: (c_void_p, int) -> Array[int] """Make sure to call taos_fetch_row or taos_fetch_block before fetch_lengths""" lens = _libtaos.taos_fetch_lengths(result) - if field_count == None: + if field_count is None: field_count = taos_field_count(result) if not lens: raise OperationalError("field length empty, use taos_fetch_row/block before it") @@ -823,7 +823,7 @@ def taos_stmt_use_result(stmt): @stmt: TAOS_STMT* """ result = c_void_p(_libtaos.taos_stmt_use_result(stmt)) - if result == None: + if result is None: raise StatementError(taos_stmt_errstr(stmt)) return result diff --git a/src/connector/python/taos/result.py b/src/connector/python/taos/result.py index 8b8a0cf108cf7c941d0a6476d8a9c1e2c5a41b84..05085a493eb8ffede536476f1ddf3bcb083d82f8 100644 --- a/src/connector/python/taos/result.py +++ b/src/connector/python/taos/result.py @@ -41,7 +41,7 @@ class TaosResult(object): if self._result is None or self.fields is None: raise OperationalError("Invalid use of fetch iterator") - if self._block == None or self._block_iter >= self._block_length: + if self._block is None or self._block_iter >= self._block_length: self._block, self._block_length = self.fetch_block() self._block_iter = 0 # self._row_count += self._block_length @@ -55,7 +55,7 @@ class TaosResult(object): """fields definitions of the current result""" if self._result is None: raise ResultError("no result object setted") - if self._fields == None: + if self._fields is None: self._fields = taos_fetch_fields(self._result) return self._fields @@ -72,7 +72,7 @@ class TaosResult(object): @property def precision(self): - if self._precision == None: + if self._precision is None: self._precision = taos_result_precision(self._result) return self._precision @@ -114,7 +114,7 @@ class TaosResult(object): if self._result is None: raise OperationalError("Invalid use of fetchall") - if self._fields == None: + if self._fields is None: self._fields = taos_fetch_fields(self._result) buffer = [[] for i in range(len(self._fields))] self._row_count = 0 @@ -150,7 +150,7 @@ class TaosResult(object): return taos_errstr(self._result) def check_error(self, errno=None, close=True): - if errno == None: + if errno is None: errno = self.errno() if errno != 0: msg = self.errstr() diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 8700cf246a91655c307bbb4c3c2c111d3271fc67..fb70badb862943a0259b2dc94bf52b0a452bd714 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -274,7 +274,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value") #define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data") #define TSDB_CODE_TDB_INCOMPLETE_DFILESET TAOS_DEF_ERROR_CODE(0, 0x0617) //"TSDB incomplete DFileSet") -#define TSDB_CODE_TDB_NO_JSON_TAG_KEY TAOS_DEF_ERROR_CODE(0, 0x0618) //"TSDB no tag json key") // query #define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle") diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index db572c9d310709dc6fe024b351126679ea9805e1..dac31fc1f1b88581ed8976634fd767a3eddd2cea 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -32,6 +32,10 @@ ELSEIF (TD_WINDOWS) LIST(APPEND SRC ./src/shellMain.c) LIST(APPEND SRC ./src/shellWindows.c) ADD_EXECUTABLE(shell ${SRC}) + IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEPENDENCIES(shell jemalloc) + ENDIF () + TARGET_LINK_LIBRARIES(shell taos_static cJson) IF (TD_POWER) diff --git a/src/kit/taos-tools b/src/kit/taos-tools index beca4813316f254624d8dbecf54d45a5a232c61d..14a23779d24a9571cdb7165bea2b0208d54c53ad 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit beca4813316f254624d8dbecf54d45a5a232c61d +Subproject commit 14a23779d24a9571cdb7165bea2b0208d54c53ad diff --git a/src/mnode/CMakeLists.txt b/src/mnode/CMakeLists.txt index dc2afbbb68de5a9466306721cc966a6f6c8cbd12..daa343c4ef1d5f15d357080829f556d60c95bcff 100644 --- a/src/mnode/CMakeLists.txt +++ b/src/mnode/CMakeLists.txt @@ -8,3 +8,8 @@ INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(mnode ${SRC}) + +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEPENDENCIES(mnode jemalloc) +ENDIF () + diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt index 2d537d95885a5e2d86e18ff19e1851fc8eea5997..d0838fa0787ce2e13af231bbae4a6bfa9e41b798 100644 --- a/src/os/src/detail/CMakeLists.txt +++ b/src/os/src/detail/CMakeLists.txt @@ -11,6 +11,11 @@ ADD_LIBRARY(os ${SRC}) IF (TD_LINUX) TARGET_LINK_LIBRARIES(os oslinux) + + IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEPENDENCIES(os jemalloc) + ENDIF () + IF (TD_ARM_32 OR TD_LINUX_32) TARGET_LINK_LIBRARIES(os atomic) ENDIF () diff --git a/src/os/src/linux/CMakeLists.txt b/src/os/src/linux/CMakeLists.txt index 612ac8d5ab44ea3d2a33686f3df83646a4f1e268..ed51e79e925591b6bfa32b7b7c156f2d1f561d89 100644 --- a/src/os/src/linux/CMakeLists.txt +++ b/src/os/src/linux/CMakeLists.txt @@ -4,4 +4,9 @@ PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) ADD_LIBRARY(oslinux ${SRC}) -TARGET_LINK_LIBRARIES(oslinux m rt z dl) \ No newline at end of file +TARGET_LINK_LIBRARIES(oslinux m rt z dl) + +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEPENDENCIES(oslinux jemalloc) +ENDIF () + diff --git a/src/plugins/taosadapter b/src/plugins/taosadapter index 826f3d3b7820a5c007d301854d56db003b424d0a..11d1e02255edfeeaa8d5b1f45abfa9637332ce65 160000 --- a/src/plugins/taosadapter +++ b/src/plugins/taosadapter @@ -1 +1 @@ -Subproject commit 826f3d3b7820a5c007d301854d56db003b424d0a +Subproject commit 11d1e02255edfeeaa8d5b1f45abfa9637332ce65 diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index b9bf4120b6d58856ddea1b0639bfa48dbd909a7b..6b8e31b181559c3d2e92cb52c5b50d4261c66611 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -238,7 +238,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } - + // (uid, tid) + VGID + TAGSIZE + VARSTR_HEADER_SIZE if (functionId == TSDB_FUNC_TID_TAG) { // todo use struct *type = TSDB_DATA_TYPE_BINARY; @@ -253,7 +253,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *interBytes = 0; return TSDB_CODE_SUCCESS; } - + if (functionId == TSDB_FUNC_COUNT) { *type = TSDB_DATA_TYPE_BIGINT; *bytes = sizeof(int64_t); @@ -261,7 +261,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } - + if (functionId == TSDB_FUNC_TS_COMP) { *type = TSDB_DATA_TYPE_BINARY; *bytes = 1; // this results is compressed ts data, only one byte @@ -316,20 +316,20 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = TSDB_DATA_TYPE_BINARY; *bytes = (dataBytes + DATA_SET_FLAG_SIZE); *interBytes = *bytes; - + return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_SUM) { *type = TSDB_DATA_TYPE_BINARY; *bytes = sizeof(SSumInfo); *interBytes = *bytes; - + return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_AVG) { *type = TSDB_DATA_TYPE_BINARY; *bytes = sizeof(SAvgInfo); *interBytes = *bytes; return TSDB_CODE_SUCCESS; - + } else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) { *type = TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(SRateInfo); @@ -339,7 +339,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = TSDB_DATA_TYPE_BINARY; *bytes = (sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param); *interBytes = *bytes; - + return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_SAMPLE) { *type = TSDB_DATA_TYPE_BINARY; @@ -351,7 +351,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = TSDB_DATA_TYPE_BINARY; *bytes = sizeof(SSpreadInfo); *interBytes = *bytes; - + return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_APERCT) { *type = TSDB_DATA_TYPE_BINARY; @@ -359,13 +359,13 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI int32_t bytesDigest = (int32_t) (sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION)); *bytes = MAX(bytesHist, bytesDigest); *interBytes = *bytes; - + return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_LAST_ROW) { *type = TSDB_DATA_TYPE_BINARY; *bytes = (sizeof(SLastrowInfo) + dataBytes); *interBytes = *bytes; - + return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_TWA) { *type = TSDB_DATA_TYPE_DOUBLE; @@ -388,7 +388,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI } else { *type = TSDB_DATA_TYPE_DOUBLE; } - + *bytes = sizeof(int64_t); *interBytes = sizeof(SSumInfo); return TSDB_CODE_SUCCESS; @@ -458,9 +458,9 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { *type = (int16_t)dataType; *bytes = dataBytes; - + size_t size = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param; - + // the output column may be larger than sizeof(STopBotInfo) *interBytes = (int32_t)size; } else if (functionId == TSDB_FUNC_SAMPLE) { @@ -484,7 +484,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI } else { return TSDB_CODE_TSC_INVALID_OPERATION; } - + return TSDB_CODE_SUCCESS; } @@ -501,7 +501,7 @@ int32_t isValidFunction(const char* name, int32_t len) { return aScalarFunctions[i].functionId; } } - + for(int32_t i = 0; i <= TSDB_FUNC_ELAPSED; ++i) { int32_t nameLen = (int32_t) strlen(aAggs[i].name); if (len != nameLen) { @@ -519,7 +519,7 @@ static bool function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo if (pResultInfo->initialized) { return false; } - + memset(pCtx->pOutput, 0, (size_t)pCtx->outputBytes); initResultInfo(pResultInfo, pCtx->interBufBytes); return true; @@ -537,7 +537,7 @@ static void function_finalizer(SQLFunctionCtx *pCtx) { if (pResInfo->hasResult != DATA_SET_FLAG) { setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); } - + doFinalizer(pCtx); } @@ -547,7 +547,7 @@ static void function_finalizer(SQLFunctionCtx *pCtx) { */ static void count_function(SQLFunctionCtx *pCtx) { int32_t numOfElem = 0; - + /* * 1. column data missing (schema modified) causes pCtx->hasNull == true. pCtx->preAggVals.isSet == true; * 2. for general non-primary key columns, pCtx->hasNull may be true or false, pCtx->preAggVals.isSet == true; @@ -562,7 +562,7 @@ static void count_function(SQLFunctionCtx *pCtx) { if (isNull(val, pCtx->inputType)) { continue; } - + numOfElem += 1; } } else { @@ -570,11 +570,11 @@ static void count_function(SQLFunctionCtx *pCtx) { numOfElem = pCtx->size; } } - + if (numOfElem > 0) { GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; } - + *((int64_t *)pCtx->pOutput) += numOfElem; SET_VAL(pCtx, numOfElem, 1); } @@ -584,7 +584,7 @@ static void count_func_merge(SQLFunctionCtx *pCtx) { for (int32_t i = 0; i < pCtx->size; ++i) { *((int64_t *)pCtx->pOutput) += pData[i]; } - + SET_VAL(pCtx, pCtx->size, 1); } @@ -679,12 +679,12 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { static void do_sum(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - + // Only the pre-computing information loaded and actual data does not loaded if (pCtx->preAggVals.isSet) { notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull; assert(pCtx->size >= pCtx->preAggVals.statis.numOfNull); - + if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { int64_t *retVal = (int64_t *)pCtx->pOutput; *retVal += pCtx->preAggVals.statis.sum; @@ -731,10 +731,10 @@ static void do_sum(SQLFunctionCtx *pCtx) { LIST_ADD_N_DOUBLE_FLOAT(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType); } } - + // data in the check operation are all null, not output SET_VAL(pCtx, notNullElems, 1); - + if (notNullElems > 0) { GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; } @@ -742,7 +742,7 @@ static void do_sum(SQLFunctionCtx *pCtx) { static void sum_function(SQLFunctionCtx *pCtx) { do_sum(pCtx); - + // keep the result data in output buffer, not in the intermediate buffer SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { @@ -778,7 +778,7 @@ static void sum_func_merge(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - + if (notNullElems > 0) { pResInfo->hasResult = DATA_SET_FLAG; } @@ -797,7 +797,7 @@ static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t c if (pCtx->order == TSDB_ORDER_DESC) { return BLK_DATA_NO_NEEDED; } - + // no result for first query, data block is required if (GET_RES_INFO(pCtx) == NULL || GET_RES_INFO(pCtx)->numOfRes <= 0) { return BLK_DATA_ALL_NEEDED; @@ -810,7 +810,7 @@ static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t co if (pCtx->order != pCtx->param[0].i64) { return BLK_DATA_NO_NEEDED; } - + if (GET_RES_INFO(pCtx) == NULL || GET_RES_INFO(pCtx)->numOfRes <= 0) { return BLK_DATA_ALL_NEEDED; } else { @@ -866,17 +866,17 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_ */ static void avg_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; - + // NOTE: keep the intermediate result into the interResultBuf SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - + SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); double *pVal = &pAvgInfo->sum; - + if (pCtx->preAggVals.isSet) { // Pre-aggregation notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull; assert(notNullElems >= 0); - + if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { *pVal += pCtx->preAggVals.statis.sum; } else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) { @@ -886,7 +886,7 @@ static void avg_function(SQLFunctionCtx *pCtx) { } } else { void *pData = GET_INPUT_DATA_LIST(pCtx); - + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { LIST_ADD_N(*pVal, pCtx, pData, int8_t, notNullElems, pCtx->inputType); } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { @@ -909,18 +909,18 @@ static void avg_function(SQLFunctionCtx *pCtx) { LIST_ADD_N(*pVal, pCtx, pData, uint64_t, notNullElems, pCtx->inputType); } } - + if (!pCtx->hasNull) { assert(notNullElems == pCtx->size); } - + SET_VAL(pCtx, notNullElems, 1); pAvgInfo->num += notNullElems; - + if (notNullElems > 0) { pResInfo->hasResult = DATA_SET_FLAG; } - + // keep the data into the final output buffer for super table query since this execution may be the last one if (pCtx->stableQuery) { memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); @@ -929,18 +929,18 @@ static void avg_function(SQLFunctionCtx *pCtx) { static void avg_func_merge(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - + double *sum = (double*) pCtx->pOutput; char *input = GET_INPUT_DATA_LIST(pCtx); - + for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { SAvgInfo *pInput = (SAvgInfo *)input; if (pInput->num == 0) { // current input is null continue; } - + SET_DOUBLE_VAL(sum, *sum + pInput->sum); - + // keep the number of data into the temp buffer *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo) += pInput->num; } @@ -951,10 +951,10 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) { */ static void avg_finalizer(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - + if (pCtx->currentStage == MERGE_STAGE) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); - + if (GET_INT64_VAL(GET_ROWCELL_INTERBUF(pResInfo)) <= 0) { setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; @@ -964,15 +964,15 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) { } else { // this is the secondary merge, only in the secondary merge, the input type is TSDB_DATA_TYPE_BINARY assert(IS_NUMERIC_TYPE(pCtx->inputType)); SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); - + if (pAvgInfo->num == 0) { // all data are NULL or empty table setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); return; } - + SET_DOUBLE_VAL((double *)pCtx->pOutput, pAvgInfo->sum / pAvgInfo->num); } - + // cannot set the numOfIteratedElems again since it is set during previous iteration GET_RES_INFO(pCtx)->numOfRes = 1; doFinalizer(pCtx); @@ -992,7 +992,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, void* tval = NULL; int16_t index = 0; - + if (isMin) { tval = &pCtx->preAggVals.statis.min; index = pCtx->preAggVals.statis.minIndex; @@ -1000,7 +1000,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, tval = &pCtx->preAggVals.statis.max; index = pCtx->preAggVals.statis.maxIndex; } - + TSKEY key = TSKEY_INITIAL_VAL; if (pCtx->ptsList != NULL) { /** @@ -1016,23 +1016,23 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, // the index is the original position, not the relative position key = pCtx->ptsList[index]; } - + if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { int64_t val = GET_INT64_VAL(tval); if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { int8_t *data = (int8_t *)pOutput; - + UPDATE_DATA(pCtx, *data, (int8_t)val, notNullElems, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { int16_t *data = (int16_t *)pOutput; - + UPDATE_DATA(pCtx, *data, (int16_t)val, notNullElems, isMin, key); } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { int32_t *data = (int32_t *)pOutput; #if defined(_DEBUG_VIEW) qDebug("max value updated according to pre-cal:%d", *data); #endif - + if ((*data < val) ^ isMin) { *data = (int32_t)val; for (int32_t i = 0; i < (pCtx)->tagInfo.numOfTagCols; ++i) { @@ -1041,7 +1041,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, __ctx->tag.i64 = key; __ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; } - + aAggs[TSDB_FUNC_TAG].xFunction(__ctx); } } @@ -1073,18 +1073,18 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { float *data = (float *)pOutput; double val = GET_DOUBLE_VAL(tval); - + UPDATE_DATA(pCtx, *data, (float)val, notNullElems, isMin, key); } - + return; } - + void *p = GET_INPUT_DATA_LIST(pCtx); TSKEY *tsList = GET_TS_LIST(pCtx); *notNullElems = 0; - + if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { TYPED_LOOPCHECK_N(int8_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems); @@ -1093,12 +1093,12 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { int32_t *pData = p; int32_t *retVal = (int32_t*) pOutput; - + for (int32_t i = 0; i < pCtx->size; ++i) { if (pCtx->hasNull && isNull((const char*)&pData[i], pCtx->inputType)) { continue; } - + if ((*retVal < pData[i]) ^ isMin) { *retVal = pData[i]; if(tsList) { @@ -1135,9 +1135,9 @@ static bool min_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo if (!function_setup(pCtx, pResultInfo)) { return false; // not initialized since it has been initialized } - + GET_TRUE_DATA_TYPE(); - + switch (type) { case TSDB_DATA_TYPE_TINYINT: *((int8_t *)pCtx->pOutput) = INT8_MAX; @@ -1180,9 +1180,9 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo if (!function_setup(pCtx, pResultInfo)) { return false; // not initialized since it has been initialized } - + GET_TRUE_DATA_TYPE(); - + switch (type) { case TSDB_DATA_TYPE_INT: *((int32_t *)pCtx->pOutput) = INT32_MIN; @@ -1217,7 +1217,7 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo default: qError("illegal data type:%d in min/max query", pCtx->inputType); } - + return true; } @@ -1227,13 +1227,13 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo static void min_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; minMax_function(pCtx, pCtx->pOutput, 1, ¬NullElems); - + SET_VAL(pCtx, notNullElems, 1); - + if (notNullElems > 0) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; - + // set the flag for super table query if (pCtx->stableQuery) { *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG; @@ -1244,13 +1244,13 @@ static void min_function(SQLFunctionCtx *pCtx) { static void max_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; minMax_function(pCtx, pCtx->pOutput, 0, ¬NullElems); - + SET_VAL(pCtx, notNullElems, 1); - + if (notNullElems > 0) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); pResInfo->hasResult = DATA_SET_FLAG; - + // set the flag for super table query if (pCtx->stableQuery) { *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG; @@ -1260,16 +1260,16 @@ static void max_function(SQLFunctionCtx *pCtx) { static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *output, bool isMin) { int32_t notNullElems = 0; - + GET_TRUE_DATA_TYPE(); assert(pCtx->stableQuery); - + for (int32_t i = 0; i < pCtx->size; ++i) { char *input = GET_INPUT_DATA(pCtx, i); if (input[bytes] != DATA_SET_FLAG) { continue; } - + switch (type) { case TSDB_DATA_TYPE_TINYINT: { int8_t v = GET_INT8_VAL(input); @@ -1285,12 +1285,12 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp int32_t v = GET_INT32_VAL(input); if ((*(int32_t *)output < v) ^ isMin) { *(int32_t *)output = v; - + for (int32_t j = 0; j < pCtx->tagInfo.numOfTagCols; ++j) { SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[j]; aAggs[TSDB_FUNC_TAG].xFunction(__ctx); } - + notNullElems++; } break; @@ -1339,15 +1339,15 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp break; } } - + return notNullElems; } static void min_func_merge(SQLFunctionCtx *pCtx) { int32_t notNullElems = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 1); - + SET_VAL(pCtx, notNullElems, 1); - + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (notNullElems > 0) { pResInfo->hasResult = DATA_SET_FLAG; @@ -1356,9 +1356,9 @@ static void min_func_merge(SQLFunctionCtx *pCtx) { static void max_func_merge(SQLFunctionCtx *pCtx) { int32_t numOfElem = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 0); - + SET_VAL(pCtx, numOfElem, 1); - + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (numOfElem > 0) { pResInfo->hasResult = DATA_SET_FLAG; @@ -4870,7 +4870,8 @@ static void elapsedFinalizer(SQLFunctionCtx *pCtx) { } SElapsedInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - *(double *)pCtx->pOutput = (double)pInfo->max - (double)pInfo->min; + double result = (double)pInfo->max - (double)pInfo->min; + *(double *)pCtx->pOutput = result >= 0 ? result : -result; if (pCtx->numOfParams > 0 && pCtx->param[0].i64 > 0) { *(double *)pCtx->pOutput = *(double *)pCtx->pOutput / pCtx->param[0].i64; } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 2ba143b147d5924eb07aeb9bb2fd321b5d519573..6346e743081a6594fcc9e8d8001ae18e3f90ac92 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1942,7 +1942,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr for (int32_t j = 0; j < pCtx->numOfParams; ++j) { int16_t type = pSqlExpr->param[j].nType; int16_t bytes = pSqlExpr->param[j].nLen; - if (pSqlExpr->functionId == TSDB_FUNC_STDDEV_DST) { + if (pSqlExpr->functionId == TSDB_FUNC_STDDEV_DST || pSqlExpr->functionId == TSDB_FUNC_TS_COMP) { continue; } diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index cc214b953303e3b10b053bbe0c183eaee520e32a..9d174b0389d74073b5989af5a8fd7c26d5fd80dd 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -1040,7 +1040,10 @@ void tColModelDisplay(SColumnModel *pModel, void *pData, int32_t numOfRows, int3 break; case TSDB_DATA_TYPE_NCHAR: { char buf[4096] = {0}; - taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); + int32_t len = taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); + if (len < 0){ + qError("castConvert1 taosUcs4ToMbs error"); + } printf("%s\t", buf); break; } @@ -1092,7 +1095,10 @@ void tColModelDisplayEx(SColumnModel *pModel, void *pData, int32_t numOfRows, in break; case TSDB_DATA_TYPE_NCHAR: { char buf[128] = {0}; - taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); + int32_t len = taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); + if (len < 0){ + qError("castConvert1 taosUcs4ToMbs error"); + } printf("%s\t", buf); break; } diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index 05ecda2a51f33c00130bcad0c5aa7139e7429a48..6869017e116ab9fe9dce30fbb028242f0e990a4b 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -1899,12 +1899,20 @@ int32_t filterInitValFieldData(SFilterInfo *info) { (unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){ char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; int32_t len = taosUcs4ToMbs(varDataVal(fi->data), varDataLen(fi->data), varDataVal(newValData)); + if (len < 0){ + qError("filterInitValFieldData taosUcs4ToMbs error 1"); + return TSDB_CODE_FAILED; + } varDataSetLen(newValData, len); varDataCopy(fi->data, newValData); }else if(type == TSDB_DATA_TYPE_JSON && (unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){ char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; int32_t len = taosUcs4ToMbs(((tVariant*)(fi->desc))->pz, ((tVariant*)(fi->desc))->nLen, newValData); + if (len < 0){ + qError("filterInitValFieldData taosUcs4ToMbs error 2"); + return TSDB_CODE_FAILED; + } memcpy(((tVariant*)(fi->desc))->pz, newValData, len); ((tVariant*)(fi->desc))->nLen = len; } @@ -3025,6 +3033,11 @@ static void doJsonCompare(SFilterComUnit *cunit, int8_t *result, void* colData){ }else{ char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); int len = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(newColData)); + if (len < 0){ + qError("castConvert1 taosUcs4ToMbs error"); + tfree(newColData); + return; + } varDataSetLen(newColData, len); tVariant* val = cunit->valData; char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; @@ -3113,9 +3126,13 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_NCHAR && (info->cunits[uidx].optr == TSDB_RELATION_MATCH || info->cunits[uidx].optr == TSDB_RELATION_NMATCH)){ char *newColData = calloc(info->cunits[uidx].dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); - int len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData)); - varDataSetLen(newColData, len); - (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData); + int32_t len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData)); + if (len < 0){ + qError("castConvert1 taosUcs4ToMbs error"); + }else{ + varDataSetLen(newColData, len); + (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData); + } tfree(newColData); }else if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_JSON){ doJsonCompare(&(info->cunits[uidx]), &(*p)[i], colData); @@ -3170,9 +3187,13 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis * } else { if(cunit->dataType == TSDB_DATA_TYPE_NCHAR && (cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NMATCH)){ char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); - int len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData)); - varDataSetLen(newColData, len); - (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData); + int32_t len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData)); + if (len < 0){ + qError("castConvert1 taosUcs4ToMbs error"); + }else{ + varDataSetLen(newColData, len); + (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData); + } tfree(newColData); }else if(cunit->dataType == TSDB_DATA_TYPE_JSON){ doJsonCompare(cunit, &(*p)[i], colData); @@ -3577,7 +3598,11 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar char *src = FILTER_GET_COL_FIELD_DATA(fi, j); char *dst = FILTER_GET_COL_FIELD_DATA(&nfi, j); int32_t len = 0; - taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len); + bool ret = taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len); + if(!ret) { + qError("filterConverNcharColumns taosMbsToUcs4 error"); + return TSDB_CODE_FAILED; + } varDataLen(dst) = len; } diff --git a/src/query/src/qFilterfunc.c b/src/query/src/qFilterfunc.c index 1c1ec21d653b5b96fe792aa05641191a441b8e8d..3c03b4c29196bf97fc12f859859eff4665a1a9a0 100644 --- a/src/query/src/qFilterfunc.c +++ b/src/query/src/qFilterfunc.c @@ -183,7 +183,7 @@ bool likeOperator(SColumnFilterElem *pFilter, const char *minval, const char *ma return patternMatch((char *)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval), &info) == TSDB_PATTERN_MATCH; } else if (type == TSDB_DATA_TYPE_NCHAR) { SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; - return WCSPatternMatch((wchar_t*)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE, &info) == TSDB_PATTERN_MATCH; + return WCSPatternMatch((uint32_t *) pFilter->filterInfo.pz, (uint32_t *) varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE, &info) == TSDB_PATTERN_MATCH; } else { return false; } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 229d684e8364a4c1aabc5cf1825e45cc698a1de3..a095bff61e05822f6389a51671f98fc5a33e0bbe 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -1463,6 +1463,7 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { tsdbFreeTable(pTable); return NULL; } + taosHashSetFreeFp(pTable->jsonKeyMap, taosArrayDestroyForHash); }else{ pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index f3241cd0e0ea4c7929c6d0c82df339a513299c7d..be734ce0cccad6827cba4e2c27d0be478af92af3 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -4243,20 +4243,28 @@ char* parseTagDatatoJson(void *p){ } cJSON_AddItemToObject(json, tagJsonKey, value); }else if(type == TSDB_DATA_TYPE_NCHAR) { - char *tagJsonValue = calloc(varDataLen(realData), 1); - int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue); - if (length < 0) { - tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, - (char*)val); + cJSON* value = NULL; + if (varDataLen(realData) > 0){ + char *tagJsonValue = calloc(varDataLen(realData), 1); + int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue); + if (length < 0) { + tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, + (char*)val); + free(tagJsonValue); + goto end; + } + value = cJSON_CreateString(tagJsonValue); free(tagJsonValue); - goto end; - } - cJSON* value = cJSON_CreateString(tagJsonValue); - free(tagJsonValue); - if (value == NULL) - { - goto end; + if (value == NULL) + { + goto end; + } + }else if(varDataLen(realData) == 0){ + value = cJSON_CreateString(""); + }else{ + assert(0); } + cJSON_AddItemToObject(json, tagJsonKey, value); }else if(type == TSDB_DATA_TYPE_DOUBLE){ double jsonVd = *(double*)(realData); diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index be62ce0a659d0e07d904cac4f994b4639cd18917..35b6892785622c3e574947c2595adf4a2311de74 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -44,7 +44,7 @@ typedef struct SPatternCompareInfo { int patternMatch(const char *pattern, const char *str, size_t size, const SPatternCompareInfo *pInfo); -int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, const SPatternCompareInfo *pInfo); +int WCSPatternMatch(const uint32_t *pattern, const uint32_t *str, size_t size, const SPatternCompareInfo *pInfo); int32_t doCompare(const char* a, const char* b, int32_t type, size_t size); diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 1cfc0c3873a438699c342a7dd4a4b1a8efd32878..02b0e83061d732e7c5b7cb8a88e5717c6e776f56 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -321,29 +321,94 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } -int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, const SPatternCompareInfo *pInfo) { - wchar_t c, c1; - wchar_t matchOne = L'_'; // "_" - wchar_t matchAll = L'%'; // "%" +static uint32_t * +taosWcschr (const uint32_t *wcs, const uint32_t wc) +{ + const uint32_t *wcs2 = wcs + 1; + if (*wcs == wc) + return (uint32_t *) wcs; + if (*wcs == L'\0') + return NULL; + do + { + wcs += 2; + if (*wcs2 == wc) + return (uint32_t *) wcs2; + if (*wcs2 == L'\0') + return NULL; + wcs2 += 2; + if (*wcs == wc) + return (uint32_t *) wcs; + if (*wcs == L'\0') + return NULL; + wcs += 2; + if (*wcs2 == wc) + return (uint32_t *) wcs2; + if (*wcs2 == L'\0') + return NULL; + wcs2 += 2; + if (*wcs == wc) + return (uint32_t *) wcs; + if (*wcs == L'\0') + return NULL; + wcs += 2; + if (*wcs2 == wc) + return (uint32_t *) wcs2; + if (*wcs2 == L'\0') + return NULL; + wcs2 += 2; + if (*wcs == wc) + return (uint32_t *) wcs; + if (*wcs == L'\0') + return NULL; + wcs += 2; + if (*wcs2 == wc) + return (uint32_t *) wcs2; + if (*wcs2 == L'\0') + return NULL; + wcs2 += 2; + if (*wcs == wc) + return (uint32_t *) wcs; + } + while (*wcs != L'\0'); + return NULL; +} + +static size_t +taosWcscspn (const uint32_t *wcs, const uint32_t *reject) +{ + size_t count = 0; + while (*wcs != L'\0') + if (taosWcschr (reject, *wcs++) == NULL) + ++count; + else + return count; + return count; +} + +int WCSPatternMatch(const uint32_t *patterStr, const uint32_t *str, size_t size, const SPatternCompareInfo *pInfo) { + uint32_t c, c1; + uint32_t matchOne = (uint32_t) L'_'; // "_" + uint32_t matchAll = (uint32_t) L'%'; // "%" int32_t i = 0; int32_t j = 0; while ((c = patterStr[i++]) != 0) { if (c == matchAll) { /* Match "%" */ - while ((c = patterStr[i++]) == matchAll || c == matchOne) { if (c == matchOne && (j >= size || str[j++] == 0)) { return TSDB_PATTERN_NOWILDCARDMATCH; } } + if (c == 0) { return TSDB_PATTERN_MATCH; } - wchar_t accept[3] = {towupper(c), towlower(c), 0}; + uint32_t accept[3] = {towupper(c), towlower(c), 0}; while (1) { - size_t n = wcscspn(str, accept); + size_t n = taosWcscspn(str, accept); str += n; if (str[0] == 0 || (n >= size)) { @@ -465,7 +530,7 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); memcpy(str, varDataVal(pLeft), size * sizeof(wchar_t)); - int32_t ret = WCSPatternMatch(pattern, str, size, &pInfo); + int32_t ret = WCSPatternMatch((uint32_t *)pattern, (uint32_t *)str, size, &pInfo); free(pattern); free(str); diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 3853d2e9c7491db68abf4ca9f7d42edd62da5729..acbee18ec21b02761295de90ef9ff535a97739d1 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -282,7 +282,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last row data") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INCOMPLETE_DFILESET, "Incomplete DFileSet") -TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_JSON_TAG_KEY, "TSDB no tag json key") // query TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle") diff --git a/tests/develop-test/0-management/3-tag/json_tag.py b/tests/develop-test/0-others/json_tag.py similarity index 100% rename from tests/develop-test/0-management/3-tag/json_tag.py rename to tests/develop-test/0-others/json_tag.py diff --git a/tests/develop-test/1-insert/0-sql/batchInsert.py b/tests/develop-test/1-insert/batchInsert.py similarity index 100% rename from tests/develop-test/1-insert/0-sql/batchInsert.py rename to tests/develop-test/1-insert/batchInsert.py diff --git a/tests/develop-test/2-query/0-aggregate/.gitkeep b/tests/develop-test/2-query/0-aggregate/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/1-select/.gitkeep b/tests/develop-test/2-query/1-select/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/2-compute/.gitkeep b/tests/develop-test/2-query/2-compute/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/3-join/.gitkeep b/tests/develop-test/2-query/3-join/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/4-union/.gitkeep b/tests/develop-test/2-query/4-union/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/5-session/.gitkeep b/tests/develop-test/2-query/5-session/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/6-state_window/.gitkeep b/tests/develop-test/2-query/6-state_window/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/7-nest/.gitkeep b/tests/develop-test/2-query/7-nest/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/8-udf/.gitkeep b/tests/develop-test/2-query/8-udf/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/2-query/9-others/.gitkeep b/tests/develop-test/2-query/9-others/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/9-others/TD-12344.py b/tests/develop-test/2-query/session_two_stage.py similarity index 81% rename from tests/system-test/2-query/9-others/TD-12344.py rename to tests/develop-test/2-query/session_two_stage.py index 4588893e157af3a0e2a2906a35228e9034502252..ca17814c8e31a2f7e9aca3712655cb50f6a0f0b8 100644 --- a/tests/system-test/2-query/9-others/TD-12344.py +++ b/tests/develop-test/2-query/session_two_stage.py @@ -48,10 +48,7 @@ class TDTestCase: def caseDescription(self): ''' - case1 : [TD-11389] : - this test case is an test case for cache error , it will let the cached data obtained by the client that has connected to taosd incorrect, - root cause : table schema is changed, tag hostname size is increased through schema-less insertion. The schema cache of client taos is not refreshed. - + case1 : [TD-12344] : fix session window for super table two stage query ''' return @@ -97,9 +94,8 @@ class TDTestCase: cfg_path = self.getcfgPath() print(cfg_path) - # tdSql.execute('select elapsed(ts,10s) from st where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1d) group by tbname;') # session not support super table - os.system("taos -c %s -s 'select elapsed(ts,10s) from testdb.st where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" session(ts,1d) group by tbname;' " % (cfg_path)) - + tdSql.query('select elapsed(ts,10s) from st where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1d) group by tbname;') # session not support super table + tdSql.checkRows(10) diff --git a/tests/develop-test/2-query/7-nest/ts_hidden_column.py b/tests/develop-test/2-query/ts_hidden_column.py similarity index 100% rename from tests/develop-test/2-query/7-nest/ts_hidden_column.py rename to tests/develop-test/2-query/ts_hidden_column.py diff --git a/tests/develop-test/2-query/union-order.py b/tests/develop-test/2-query/union-order.py new file mode 100644 index 0000000000000000000000000000000000000000..6c2f20c129d294601f89757d01710f5e080a1717 --- /dev/null +++ b/tests/develop-test/2-query/union-order.py @@ -0,0 +1,60 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12229]fix union all query produces different result when switch query + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists td12229") + tdSql.execute("create database if not exists td12229") + tdSql.execute('use td12229') + + tdSql.execute('create stable st(ts timestamp , value int ) tags (ind int)') + tdSql.execute('insert into tb1 using st tags(1) values(now ,1)') + tdSql.execute('insert into tb1 using st tags(1) values(now+1s ,2)') + tdSql.execute('insert into tb1 using st tags(1) values(now+2s ,3)') + tdSql.execute('create stable ste(ts timestamp , value int ) tags (ind int)') + tdSql.query('select * from st') + tdSql.checkRows(3) + tdSql.query('select * from st union all select * from ste') + tdSql.checkRows(3) + tdSql.query('select * from ste union all select * from st') + tdSql.checkRows(3) + tdSql.query('select elapsed(ts) from ste group by tbname union all select elapsed(ts) from st group by tbname;') + tdSql.checkRows(1) + tdSql.query('select elapsed(ts) from st group by tbname union all select elapsed(ts) from ste group by tbname;') + tdSql.checkRows(1) + tdSql.execute('drop database td12229') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/3-connectors/c#/test.sh b/tests/develop-test/3-connectors/c#/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..2d4f18b668263d40bb18ef46f34b7299b3f7cdd3 --- /dev/null +++ b/tests/develop-test/3-connectors/c#/test.sh @@ -0,0 +1,32 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` +cd ${WKC}/src/connector/C# +dotnet test +dotnet run --project src/test/Cases/Cases.csproj + +cd ${WKC}/tests/examples/C# +dotnet run --project C#checker/C#checker.csproj +dotnet run --project TDengineTest/TDengineTest.csproj +dotnet run --project schemaless/schemaless.csproj + +cd ${WKC}/tests/examples/C#/taosdemo +dotnet build -c Release +tree | true +./bin/Release/net5.0/taosdemo -c /etc/taos -y diff --git a/tests/develop-test/3-connectors/go/test.sh b/tests/develop-test/3-connectors/go/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..097723ad461b69c75e18bc8018c025f0e9f7a3e3 --- /dev/null +++ b/tests/develop-test/3-connectors/go/test.sh @@ -0,0 +1,20 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` + diff --git a/tests/develop-test/3-connectors/java/test.sh b/tests/develop-test/3-connectors/java/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..15f7b84955b793e0fb6acaa434fba83c6ff0c710 --- /dev/null +++ b/tests/develop-test/3-connectors/java/test.sh @@ -0,0 +1,17 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 diff --git a/tests/develop-test/3-connectors/nodejs/test.sh b/tests/develop-test/3-connectors/nodejs/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..3b1d8bb4790d6273e32a42ce50979e98e1ce5a92 --- /dev/null +++ b/tests/develop-test/3-connectors/nodejs/test.sh @@ -0,0 +1,29 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` +cd ${WKC}/src/connector/nodejs +npm install +npm run test +cd ${WKC}/tests/examples/nodejs +npm install td2.0-connector > /dev/null 2>&1 +node nodejsChecker.js host=localhost +node test1970.js +cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport +npm install td2.0-connector > /dev/null 2>&1 +node nanosecondTest.js diff --git a/tests/develop-test/3-connectors/python/test.sh b/tests/develop-test/3-connectors/python/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..fe0dfbdac99f6938c8a57d13666f609c2c7c5d33 --- /dev/null +++ b/tests/develop-test/3-connectors/python/test.sh @@ -0,0 +1,30 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../src/connector/python +pip3 install pytest +pytest tests/ + +python3 examples/bind-multi.py +python3 examples/bind-row.py +python3 examples/demo.py +python3 examples/insert-lines.py +python3 examples/pep-249.py +python3 examples/query-async.py +python3 examples/query-objectively.py +python3 examples/subscribe-sync.py +python3 examples/subscribe-async.py diff --git a/tests/develop-test/3-connectors/restful/test.sh b/tests/develop-test/3-connectors/restful/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..1c6d8fbc2c5da6633d749054a19a5bde7772faf7 --- /dev/null +++ b/tests/develop-test/3-connectors/restful/test.sh @@ -0,0 +1,19 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` diff --git a/tests/develop-test/3-connectors/rust/test.sh b/tests/develop-test/3-connectors/rust/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..4bf6394b27cf43674ed38a1e4de46342ee3b1ae4 --- /dev/null +++ b/tests/develop-test/3-connectors/rust/test.sh @@ -0,0 +1,19 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` \ No newline at end of file diff --git a/tests/develop-test/0-management/0-database/.gitkeep b/tests/develop-test/5-taos-tools/.gitkeep similarity index 100% rename from tests/develop-test/0-management/0-database/.gitkeep rename to tests/develop-test/5-taos-tools/.gitkeep diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/.gitkeep b/tests/develop-test/5-taos-tools/taosbenchmark/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/5-taos-tools/taosdump/.gitkeep b/tests/develop-test/5-taos-tools/taosdump/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py new file mode 100644 index 0000000000000000000000000000000000000000..14297ee867e0830fae8a776bfc7902e3f6ee4d9c --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py @@ -0,0 +1,111 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12362] taosdump supports JSON + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 int) tags(jtag JSON)") + tdSql.execute("create table t1 using st tags('{\"location\": \"beijing\"}')") + tdSql.execute("insert into t1 values(1500000000000, 1)") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system("%staosdump --databases db -o %s" % (binPath, self.tmpdir)) + + tdSql.execute("drop database db") + + os.system("%staosdump -i %s" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 't1') + + tdSql.query("select jtag->'location' from st") + tdSql.checkRows(1) + tdSql.checkData(0, 0, "\"beijing\"") + + tdSql.query("select * from st where jtag contains 'location'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, '{\"location\":\"beijing\"}') + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/7-customer/.gitkeep b/tests/develop-test/7-customer/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/0-management/1-stable/.gitkeep b/tests/develop-test/fulltest-cluster.sh old mode 100644 new mode 100755 similarity index 100% rename from tests/develop-test/0-management/1-stable/.gitkeep rename to tests/develop-test/fulltest-cluster.sh diff --git a/tests/develop-test/fulltest-connector.sh b/tests/develop-test/fulltest-connector.sh new file mode 100755 index 0000000000000000000000000000000000000000..81999dc2cf8d8945487387463c86f18106a97641 --- /dev/null +++ b/tests/develop-test/fulltest-connector.sh @@ -0,0 +1,7 @@ +bash 3-connectors/c#/test.sh +bash 3-connectors/go/test.sh +bash 3-connectors/java/test.sh +bash 3-connectors/nodejs/test.sh +bash 3-connectors/python/test.sh +bash 3-connectors/restful/test.sh +bash 3-connectors/rust/test.sh diff --git a/tests/develop-test/fulltest-insert.sh b/tests/develop-test/fulltest-insert.sh new file mode 100755 index 0000000000000000000000000000000000000000..532f7e6fc0446f6a68ca0a5e80be070684a71c23 --- /dev/null +++ b/tests/develop-test/fulltest-insert.sh @@ -0,0 +1 @@ +python3 ./test.py -f 1-insert/batchInsert.py \ No newline at end of file diff --git a/tests/develop-test/fulltest-others.sh b/tests/develop-test/fulltest-others.sh new file mode 100755 index 0000000000000000000000000000000000000000..bb0bb585b5323b45d43b01404093b97babca3ab7 --- /dev/null +++ b/tests/develop-test/fulltest-others.sh @@ -0,0 +1 @@ +python3 ./test.py -f 0-others/json_tag.py \ No newline at end of file diff --git a/tests/develop-test/fulltest-query.sh b/tests/develop-test/fulltest-query.sh new file mode 100755 index 0000000000000000000000000000000000000000..b5147d20a399e6e19bcb7d84985a83a187429780 --- /dev/null +++ b/tests/develop-test/fulltest-query.sh @@ -0,0 +1,3 @@ +python3 ./test.py -f 2-query/ts_hidden_column.py +python3 ./test.py -f 2-query/union-order.py +python3 ./test.py -f 2-query/session_two_stage.py diff --git a/tests/develop-test/0-management/2-table/.gitkeep b/tests/develop-test/fulltest-taosAdapter.sh old mode 100644 new mode 100755 similarity index 100% rename from tests/develop-test/0-management/2-table/.gitkeep rename to tests/develop-test/fulltest-taosAdapter.sh diff --git a/tests/develop-test/fulltest-tools.sh b/tests/develop-test/fulltest-tools.sh new file mode 100755 index 0000000000000000000000000000000000000000..df6e1718ccf31dfc1a2e5b652a0e38acedb8fe69 --- /dev/null +++ b/tests/develop-test/fulltest-tools.sh @@ -0,0 +1 @@ +python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeJson.py \ No newline at end of file diff --git a/tests/develop-test/fulltest.sh b/tests/develop-test/fulltest.sh deleted file mode 100755 index e7f272fc073a66685bdca709cc6434bb9838b5aa..0000000000000000000000000000000000000000 --- a/tests/develop-test/fulltest.sh +++ /dev/null @@ -1,3 +0,0 @@ -python3 test.py -f 0-management/3-tag/json_tag.py -python3 test.py -f 1-insert/0-sql/batchInsert.py -python3 test.py -f 2-query/7-nest/ts_hidden_column.py diff --git a/tests/perftest-scripts/taosadapter_perftest/config/env_init.py b/tests/perftest-scripts/taosadapter_perftest/config/env_init.py new file mode 100644 index 0000000000000000000000000000000000000000..c06f5023bfb62bc95267d3f33387169c400be898 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/config/env_init.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +import yaml +import os +import time +from loguru import logger +current_time = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime(time.time())) +current_dir = os.path.dirname(os.path.realpath(__file__)) +config_file = os.path.join(current_dir, '../config/perf_test.yaml') +f = open(config_file) +config = yaml.load(f, Loader=yaml.FullLoader) +log_file = os.path.join(current_dir, f'../log/performance_{current_time}.log') +logger.add(log_file) +logger.info(f'init env success, log will be export to {log_file}') diff --git a/tests/perftest-scripts/taosadapter_perftest/config/perf_test.yaml b/tests/perftest-scripts/taosadapter_perftest/config/perf_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2517ee83eca7fccffcd21737e2f1c21d9c841fe --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/config/perf_test.yaml @@ -0,0 +1,292 @@ +deploy_mode: no +taosd_autodeploy: False +install_package: /home/ubuntu/TDengine/release/TDengine-server-2.3.4.0-beta-Linux-x64.tar.gz +clean_env: True +hostname_prefix: vm +timeout: 10 +taosd_dnode1: + ip: 192.168.1.85 + port: 22 + restful_port: 6041 + telnet_port: 6051 + username: root + password: ****** + modify_cfg: False + cfg: + dataDir: /data/lib/taos + logDir: /data/log/taos + +taosd_cluster: False +taosadapter_separate_deploy: True +taosd_dnode2: + ip: 192.168.1.83 + port: 22 + restful_port: 6041 + telnet_port: 6046 + username: root + password: ****** + modify_cfg: False + cfg: + dataDir: /data/lib/taos + logDir: /data/log/taos + +taosd_dnode3: + ip: 192.168.1.84 + port: 22 + restful_port: 6041 + telnet_port: 6046 + username: root + password: ****** + modify_cfg: False + cfg: + dataDir: /data/lib/taos + logDir: /data/log/taos + +taosd_dnode4: + ip: 192.168.1.86 + port: 22 + restful_port: 6041 + telnet_port: 6046 + username: root + password: ****** + modify_cfg: False + cfg: + dataDir: /data/lib/taos + logDir: /data/log/taos + +prometheus: + autodeploy: True + ip: 192.168.1.101 + port: 22 + username: root + password: ****** + scrape_interval: 3s + evaluation_interval: 1s + scrape_timeout: 3s + prometheus_addr: http://39.105.163.10:9000/prometheus-2.31.1.linux-amd64.tar.gz + node_exporter_addr: https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz + process_exporter_addr: https://github.com/ncabatoff/process-exporter/releases/download/v0.7.10/process-exporter-0.7.10.linux-amd64.tar.gz + +jmeter: + autodeploy: False + aggregate_report: True + clean_aggregate_report: True + ip: 127.0.0.1 + port: 22 + username: root + password: ****** + jmeter_addr: https://dlcdn.apache.org//jmeter/binaries/apache-jmeter-5.4.1.tgz + +testcases: + testcase1: + threads: 24 + protocol: telnet-restful + taosadapter_count: 1 + stb_count: 1 + tb_count: 1 + row_count: 3000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase2: + threads: 32 + protocol: telnet-restful + taosadapter_count: 1 + stb_count: 1 + tb_count: 1 + row_count: 3000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase3: + threads: 64 + protocol: telnet-restful + taosadapter_count: 1 + stb_count: 1 + tb_count: 1 + row_count: 3000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase4: + threads: 100 + protocol: telnet-restful + taosadapter_count: 1 + stb_count: 1 + tb_count: 1 + row_count: 5000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + testcase5: + threads: 100 + protocol: telnet-restful + taosadapter_count: 2 + stb_count: 1 + tb_count: 1 + row_count: 5000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase6: + threads: 100 + protocol: telnet-restful + taosadapter_count: 3 + stb_count: 1 + tb_count: 1 + row_count: 5000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + testcase7: + threads: 100 + protocol: telnet-restful + taosadapter_count: 1 + stb_count: 1000000 + tb_count: 1 + row_count: 1 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase8: + threads: 100 + protocol: telnet-restful + taosadapter_count: 1 + stb_count: 1 + tb_count: 5000000 + row_count: 1 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + testcase9: + threads: 100 + protocol: telnet-restful + taosadapter_count: 1 + stb_count: 1 + tb_count: 100000 + row_count: 1000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + testcase10: + threads: 100 + protocol: telnet-restful + taosadapter_count: 1 + stb_count: 1 + tb_count: 10 + row_count: 10000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase11: + threads: 24 + protocol: json + taosadapter_count: 1 + stb_count: 1 + tb_count: 1 + row_count: 3000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase12: + threads: 32 + protocol: json + taosadapter_count: 1 + stb_count: 1 + tb_count: 1 + row_count: 3000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase13: + threads: 64 + protocol: json + taosadapter_count: 1 + stb_count: 1 + tb_count: 1 + row_count: 3000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase14: + threads: 100 + protocol: json + taosadapter_count: 1 + stb_count: 1 + tb_count: 1 + row_count: 5000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + testcase15: + threads: 100 + protocol: json + taosadapter_count: 2 + stb_count: 1 + tb_count: 1 + row_count: 5000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase16: + threads: 100 + protocol: json + taosadapter_count: 3 + stb_count: 1 + tb_count: 1 + row_count: 5000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + testcase17: + threads: 100 + protocol: json + taosadapter_count: 1 + stb_count: 1000000 + tb_count: 1 + row_count: 1 + sleep_time: 60s + tag_count: 10 + col_count: 1 + testcase18: + threads: 100 + protocol: json + taosadapter_count: 1 + stb_count: 1 + tb_count: 5000000 + row_count: 1 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + testcase19: + threads: 100 + protocol: json + taosadapter_count: 1 + stb_count: 1 + tb_count: 100000 + row_count: 1000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + testcase20: + threads: 100 + protocol: json + taosadapter_count: 1 + stb_count: 1 + tb_count: 10 + row_count: 10000000 + sleep_time: 60s + tag_count: 10 + col_count: 1 + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/config/taosadapter_performance_test.jmx b/tests/perftest-scripts/taosadapter_perftest/config/taosadapter_performance_test.jmx new file mode 100644 index 0000000000000000000000000000000000000000..0f4b0cf2393a109a9ea1d7df368540ba00e47eec --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/config/taosadapter_performance_test.jmx @@ -0,0 +1,250 @@ + + + + + + false + true + true + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + drop database if exists db_name + = + + + + + + + + http://restful_ip:restful_port/rest/sql + POST + true + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists db_name precision 'ms' + = + + + + + + + + http://restful_ip:restful_port/rest/sql + POST + true + false + true + false + + + + + + + + continue + + false + loop_count + + perf_threads + + false + + + true + + + + true + + + + false + input_line + = + + + + + + + + http://restful_ip:restful_port/opentsdb/v1/put/line_protocol/db_name + POST + true + false + true + false + + + + + + + org.apache.jmeter.protocol.tcp.sampler.TCPClientImpl + telnet_ip + true + telnet_port + false + 1 + 500 + input_line + false + 10 + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + , + UTF-8 + import_file + false + false + true + shareMode.all + false + row_csv_count,tb_csv_count,stb_csv_count + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/run_performance.py b/tests/perftest-scripts/taosadapter_perftest/run_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..f61d1cd0ab0965ee2e558d21b47ad91442186651 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/run_performance.py @@ -0,0 +1,82 @@ +from config.env_init import * +from src.common.common import Common +from src.common.dnodes import Dnodes +from src.common.monitor import Monitor +from src.util.jmeter import Jmeter + +class RunPerformance: + def __init__(self): + self.COM = Common() + self.current_dir = os.path.dirname(os.path.realpath(__file__)) + self.log_dir = os.path.join(self.current_dir, f'./log') + if config["jmeter"]["clean_aggregate_report"]: + self.COM.exec_local_cmd(f'sudo rm -rf {self.log_dir}/testcase*') + + def runJmeter(self): + for key, value in config['testcases'].items(): + jmx_file_list = list() + logger.info(f'executing {key}') + for jmx_file in self.COM.genJmxFile(key)[:value["taosadapter_count"]]: + jmx_filename = jmx_file.split('/')[-1] + import_file_name = jmx_filename.replace('jmx', 'txt') + import_file = os.path.join(self.current_dir, f'./config/{import_file_name}') + loop_count = self.COM.getLoopCount(value["stb_count"], value["tb_count"], value["row_count"], value["threads"]) + self.COM.genMixStbTbRows(import_file, value["stb_count"], value["tb_count"], value["row_count"]) + input_line = self.COM.genProtocolLine(value["protocol"], value["tag_count"]) + with open(jmx_file, 'r', encoding='utf-8') as f: + file_data = "" + for line in f: + if value['protocol'] == 'telnet-tcp': + if "telnet_tcp_status" in line: + line = line.replace("telnet_tcp_status", "true") + if value['protocol'] == 'telnet-restful' or value['protocol'] == 'json': + if "drop_db_status" in line: + line = line.replace("drop_db_status", "true") + if "create_db_status" in line: + line = line.replace("create_db_status", "true") + if "telnet_restful_status" in line: + line = line.replace("telnet_restful_status", "true") + if "line_protocol" in line: + if value['protocol'] == 'telnet-restful': + line = line.replace("line_protocol", 'telnet') + elif value['protocol'] == 'json': + line = line.replace("line_protocol", 'json') + else: + pass + if "db_name" in line: + db_name = jmx_filename.split('.')[0] + line = line.replace("db_name", db_name) + if "import_file" in line: + line = line.replace("import_file", import_file) + if "input_line" in line: + line = line.replace("input_line", input_line) + if "perf_threads" in line: + line = line.replace("perf_threads", str(value['threads'])) + if "loop_count" in line: + line = line.replace("loop_count", str(loop_count)) + file_data += line + with open(jmx_file, "w", encoding="utf-8") as f: + f.write(file_data) + jmx_file_list.append(jmx_file) + jmeter_cmd_list = self.COM.genJmeterCmd(jmx_file_list) + self.COM.multiThreadRun(self.COM.genJmeterThreads(jmeter_cmd_list)) + time.sleep(int(''.join(list(filter(str.isdigit, str(value["sleep_time"])))))) + +if __name__ == '__main__': + Performance = RunPerformance() + DNODES = Dnodes() + MONITOR = Monitor() + JMETER = Jmeter() + if config['deploy_mode'] == "auto": + if config['taosd_autodeploy']: + DNODES.deployNodes() + if config["prometheus"]["autodeploy"]: + MONITOR.deployAllNodeExporters() + MONITOR.deployAllProcessExporters() + MONITOR.deployPrometheus() + MONITOR.deployGrafana() + if config["jmeter"]["autodeploy"]: + JMETER.deployJmeter() + Performance.runJmeter() + + diff --git a/tests/perftest-scripts/taosadapter_perftest/src/common/common.py b/tests/perftest-scripts/taosadapter_perftest/src/common/common.py new file mode 100644 index 0000000000000000000000000000000000000000..21aa6ca734f9848dff3702431a41532fc2363b09 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/src/common/common.py @@ -0,0 +1,173 @@ +import sys +sys.path.append("../../") +from config.env_init import * +import shutil +import threading +import time +import json +class Common: + def __init__(self): + self.ip_list = list() + self.current_dir = os.path.dirname(os.path.realpath(__file__)) + self.base_jmx_file = os.path.join(self.current_dir, '../../config/taosadapter_performance_test.jmx') + self.log_dir = os.path.join(self.current_dir, '../../log') + + def exec_local_cmd(self,shell_cmd): + logger.info(f'executing cmd: {shell_cmd}') + result = os.popen(shell_cmd).read().strip() + logger.info(result) + return result + + def genTelnetMulTagStr(self, count): + tag_str = "" + for i in range(1, count): + if i < (count-1): + tag_str += f't{i}={i} ' + else: + tag_str += f't{i}={i}' + return tag_str + + def genJsonMulTagDict(self, count): + tag_dict = dict() + for i in range(1, count): + tag_dict[f"t{i}"] = f"{i}" + return tag_dict + + def genProtocolLine(self, protocol, tag_count, col_count=None): + if protocol == "telnet-restful": + base_str = 'stb_${stb_csv_count} ${row_csv_count} 32.261068286779754 t0=${tb_csv_count} ' + tag_str = self.genTelnetMulTagStr(tag_count) + telnet_line = base_str + tag_str + return telnet_line + elif protocol == "telnet-tcp": + base_str = 'tstb_${stb_csv_count} ${row_csv_count} 32.261068286779754 t0=${tb_csv_count} ' + tag_str = self.genTelnetMulTagStr(tag_count) + telnet_line = base_str + tag_str + '${__unescape(\r\n)}' + return telnet_line + elif protocol == "json": + base_tag_dict = {"t0":"${tb_csv_count}"} + dict_merged = base_tag_dict.copy() + dict_merged.update(self.genJsonMulTagDict(tag_count)) + json_line = '{"metric": "stb_${stb_csv_count}", "timestamp":${row_csv_count}, "value":32.261068286779754, ' + f'"tags": {dict_merged}' + '}' + return json_line.replace('\'','"') + elif protocol == "influxdb": + # TODO + pass + else: + pass + + def genMixStbTbRows(self, filename, stb_count, tb_count, row_count): + if stb_count == 0: + stb_count = 1 + if tb_count == 0: + tb_count = 1 + if row_count == 0: + row_count = 1 + logger.info(f'generating import data file: {filename}') + ts_start = 1614530008000 + with open(filename, "w", encoding="utf-8") as f_w: + for k in range(stb_count): + for i in range(tb_count): + for j in range(row_count): + input_line = str(ts_start) + "," + str(i) + "," + str(k) + '\n' + ts_start += 1 + f_w.write(input_line) + + def genJmxFile(self, testcase): + des_jmx_file_list = list() + base_jmx_file = os.path.join(self.current_dir, '../../config/taosadapter_performance_test.jmx') + count_flag = 0 + if config["taosadapter_separate_deploy"]: + for key in config: + if "taosd_dnode" in str(key) and "taosd_dnode1" not in str(key): + if count_flag < int(config['testcases'][testcase]['taosadapter_count']): + count_flag += 1 + else: + break + des_jmx_file = os.path.join(self.current_dir, f'../../config/{testcase}_{key}.jmx') + shutil.copyfile(base_jmx_file, des_jmx_file) + with open(des_jmx_file, 'r', encoding='utf-8') as f: + file_data = "" + for line in f: + if "restful_ip" in line: + line = line.replace("restful_ip", config[key]['ip']) + if "restful_port" in line: + line = line.replace("restful_port", str(config[key]['restful_port'])) + if "telnet_ip" in line: + line = line.replace("telnet_ip", config[key]['ip']) + if "telnet_port" in line: + line = line.replace("telnet_port", str(config[key]['telnet_port'])) + # if "db_name" in line: + # line = line.replace("db_name", key) + file_data += line + with open(des_jmx_file, "w", encoding="utf-8") as f: + f.write(file_data) + des_jmx_file_list.append(des_jmx_file) + else: + des_jmx_file = os.path.join(self.current_dir, f'../../config/{testcase}_taosd_dnode1.jmx') + shutil.copyfile(base_jmx_file, des_jmx_file) + + with open(des_jmx_file, 'r', encoding='utf-8') as f: + file_data = "" + for line in f: + if "restful_ip" in line: + line = line.replace("restful_ip", config['taosd_dnode1']['ip']) + if "restful_port" in line: + line = line.replace("restful_port", str(config['taosd_dnode1']['restful_port'])) + if "telnet_ip" in line: + line = line.replace("telnet_ip", config['taosd_dnode1']['ip']) + if "telnet_port" in line: + line = line.replace("telnet_port", str(config['taosd_dnode1']['telnet_port'])) + # if "db_name" in line: + # line = line.replace("db_name", "taosd_dnode1") + file_data += line + with open(des_jmx_file, "w", encoding="utf-8") as f: + f.write(file_data) + des_jmx_file_list.append(des_jmx_file) + return des_jmx_file_list + + def getLoopCount(self, stb_count, tb_count, row_count, threads): + if (stb_count * tb_count * row_count) % threads == 0: + loop_count = int((stb_count * tb_count * row_count) / threads) + else: + loop_count = int((stb_count * tb_count * row_count) / threads) + 1 + return loop_count + + def recreateReportDir(self, path): + ''' + recreate jmeter report path + ''' + if os.path.exists(path): + self.exec_local_cmd(f'rm -rf {path}/*') + else: + os.makedirs(path) + + def genJmeterCmd(self, jmx_file_list): + jmeter_cmd_list = list() + for jmx_file in jmx_file_list: + jmeter_cmd = f'jmeter -n -t {jmx_file}' + if config['jmeter']['aggregate_report']: + current_time = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time())) + jmx_filename = jmx_file.split('/')[-1].replace('.jmx', '') + jmx_filelog = f'{jmx_filename}_{current_time}' + jmeter_report_dir = f'{self.log_dir}/{jmx_filelog}' + self.recreateReportDir(jmeter_report_dir) + jmeter_cmd += f' -l {jmeter_report_dir}/{jmx_filelog}.log -e -o {jmeter_report_dir}' + jmeter_cmd_list.append(jmeter_cmd) + return jmeter_cmd_list + + def genJmeterThreads(self, jmeter_cmd_list): + tlist = list() + for jmeter_cmd in jmeter_cmd_list: + t = threading.Thread(target=self.exec_local_cmd, args=(jmeter_cmd,)) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + +if __name__ == '__main__': + com = Common() diff --git a/tests/perftest-scripts/taosadapter_perftest/src/common/dnodes.py b/tests/perftest-scripts/taosadapter_perftest/src/common/dnodes.py new file mode 100644 index 0000000000000000000000000000000000000000..31c1684c7e29d51860483261a8ab604c402fb15d --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/src/common/dnodes.py @@ -0,0 +1,393 @@ +import sys +import json +sys.path.append("../../") +from config.env_init import * +from src.util.RemoteModule import RemoteModule +class Dnode: + def __init__(self, index, dnode_ip, dnode_port, dnode_username, dnode_password): + self.install_package = config["install_package"] + self.hostname_prefix = config["hostname_prefix"] + self.ip_suffix = dnode_ip.split('.')[-1] + self.dnode_name = self.hostname_prefix + self.ip_suffix + self.index = index + self.dnode_dict = config[f'taosd_dnode{self.index}'] + self.dnode_ip = dnode_ip + self.dnode_port = dnode_port + self.dnode_username = dnode_username + self.dnode_password = dnode_password + self.dnode_conn = RemoteModule(self.dnode_ip, self.dnode_port, self.dnode_username, self.dnode_password) + + if self.dnode_username == "root": + self.home_dir = "/root" + else: + self.home_dir = f"/home/{self.dnode_username}" + + def installPackage(self): + if bool(int(self.dnode_conn.exec_cmd(f'cat /etc/os-release | grep ubuntu >> /dev/null && echo 1 || echo 0'))): + package_list = ["wget", "screen"] + for package in package_list: + if not bool(int(self.dnode_conn.exec_cmd(f'sudo dpkg -s {package} >> /dev/null && echo 1 || echo 0'))): + self.dnode_conn.exec_cmd(f'apt update -y && apt install -y {package}') + elif bool(int(self.dnode_conn.exec_cmd(f'cat /etc/os-release | grep centos >> /dev/null && echo 1 || echo 0'))): + package_list = ["wget", "screen"] + for package in package_list: + if not bool(int(self.dnode_conn.exec_cmd(f'sudo rpm -qa | grep {package} >> /dev/null && echo 1 || echo 0'))): + self.dnode_conn.exec_cmd(f'yum update -y && yum install -y {package}') + else: + pass + + def startTaosd(self): + logger.info(f'{self.dnode_ip}: starting taosd') + self.dnode_conn.exec_cmd("sudo systemctl start taosd") + + def stopTaosd(self): + logger.info(f'{self.dnode_ip}: stopping taosd') + self.dnode_conn.exec_cmd("sudo systemctl stop taosd") + + def killTaosd(self): + logger.info(f'{self.dnode_ip}: killing taosd') + self.dnode_conn.exec_cmd("ps -ef | grep -w taosd | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9") + + def restartTaosd(self): + logger.info(f'{self.dnode_ip}: restarting taosd') + self.dnode_conn.exec_cmd("sudo systemctl restart taosd") + + def startTaosadapter(self): + logger.info(f'{self.dnode_ip}: starting taosadapter') + self.dnode_conn.exec_cmd("sudo systemctl start taosadapter") + + def stopTaosadapter(self): + logger.info(f'{self.dnode_ip}: stopping taosadapter') + self.dnode_conn.exec_cmd("sudo systemctl stop taosadapter") + + def killTaosadapter(self): + logger.info(f'{self.dnode_ip}: killing taosadapter') + self.dnode_conn.exec_cmd("ps -ef | grep -w taosadapter | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9") + + def restartTaosadapter(self): + logger.info(f'{self.dnode_ip}: restarting taosadapter') + self.dnode_conn.exec_cmd("sudo systemctl restart taosadapter") + + def rmTaosd(self): + logger.info(f'{self.dnode_ip}: removing taosd') + self.dnode_conn.exec_cmd("rmtaos") + + def rmTaosdLog(self): + logger.info(f'{self.dnode_ip}: removing taosd log') + if self.dnode_dict["modify_cfg"]: + self.dnode_conn.exec_cmd(f'sudo rm -rf {self.dnode_dict["cfg"]["logDir"]}/*') + else: + self.dnode_conn.exec_cmd("sudo rm -rf /var/log/taos/*") + + def rmTaosdData(self): + logger.info(f'{self.dnode_ip}: removing taosd data') + if self.dnode_dict["modify_cfg"]: + self.dnode_conn.exec_cmd(f'sudo rm -rf {self.dnode_dict["cfg"]["dataDir"]}/*') + else: + self.dnode_conn.exec_cmd("sudo rm -rf /var/lib/taos/*") + + def rmTaosCfg(self): + logger.info(f'{self.dnode_ip}: removing taos.cfg') + self.dnode_conn.exec_cmd("sudo rm -rf /etc/taos/taos.cfg") + + def modifyTaosCfg(self, firstEp=None): + hostname = self.configHostname() + if self.dnode_dict["modify_cfg"]: + logger.info(f'{self.dnode_ip}: modify /etc/taos/taos.cfg') + for key, value in self.dnode_dict['cfg'].items(): + self.createRemoteDir(value) + self.dnode_conn.exec_cmd(f'echo {key} {value} >> /etc/taos/taos.cfg') + if firstEp is not None: + self.dnode_conn.exec_cmd(f'echo "firstEp {firstEp}" >> /etc/taos/taos.cfg') + self.dnode_conn.exec_cmd(f'echo "fqdn {hostname}" >> /etc/taos/taos.cfg') + + def createRemoteDir(self, dir): + ''' + if exist: echo 1 + else: echo 0 + ''' + res = bool(int(self.dnode_conn.exec_cmd(f'[ -e {dir} ] && echo 1 || echo 0'))) + if not res: + self.dnode_conn.exec_cmd(f'sudo mkdir -p {dir}') + + def getHostname(self, ip=None): + if ip == self.dnode_ip: + return self.dnode_conn.exec_cmd('hostname').strip() + else: + return False + + def configHostname(self): + logger.info(f'{self.dnode_ip}: config dnode hostname') + ori_hostname = self.dnode_conn.exec_cmd('hostname').strip() + if "localhost" in str(ori_hostname).lower(): + self.dnode_conn.exec_cmd(f"sudo hostnamectl set-hostname {self.dnode_name}") + return self.dnode_name + return ori_hostname + + def hostsIsExist(self, ip, hostname): + host_count = int(self.dnode_conn.exec_cmd(f'grep "^{ip}.*.{hostname}" /etc/hosts | wc -l')) + if host_count > 0: + logger.info(f'{self.dnode_ip}: check /etc/hosts: {ip} {hostname} existed') + return True + else: + logger.info(f'{self.dnode_ip}: check /etc/hosts: {ip} {hostname} not exist') + return False + + def configHosts(self, ip, hostname): + if not self.hostsIsExist(ip, hostname): + logger.info(f'{self.dnode_ip}: config dnode /etc/hosts: {ip} {hostname}') + self.dnode_conn.exec_cmd(f'sudo echo "{ip} {hostname}" >> /etc/hosts') + + def checkStatus(self, process): + process_count = self.dnode_conn.exec_cmd(f'ps -ef | grep -w {process} | grep -v grep | wc -l') + if int(process_count.strip()) > 0: + logger.info(f'check {self.dnode_ip} {process} existed') + return True + else: + logger.info(f'check {self.dnode_ip} {process} not exist') + return False + + def taoscCreateDnodes(self): + firstEp = f'{self.configHostname()}:6030' + self.dnode_conn.exec_cmd(f'sudo taos -s "create dnode \'{firstEp}\'"') + ready_count = self.dnode_conn.exec_cmd(f'taos -s "show dnodes" | grep {firstEp} | grep ready | wc -l') + ready_flag = 0 + if int(ready_count) == 1: + logger.success(f'deploy dnode {firstEp} success') + while int(ready_count) != 1: + if ready_flag < config["timeout"]: + ready_flag += 1 + else: + logger.error(f'deploy cluster {firstEp} failed, please check by manual') + time.sleep(1) + ready_count = self.dnode_conn.exec_cmd(f'taos -s "show dnodes" | grep {firstEp} | grep ready | wc -l') + if int(ready_count) == 1: + logger.success(f'deploy dnode {firstEp} success') + + def downloadNodeExporter(self): + logger.info(f'{self.dnode_ip}: downloading node_exporter from {config["prometheus"]["node_exporter_addr"]}') + tar_file_name = config["prometheus"]["node_exporter_addr"].split("/")[-1] + if not bool(int(self.dnode_conn.exec_cmd(f'[ -e ~/{tar_file_name} ] && echo 1 || echo 0'))): + self.dnode_conn.exec_cmd(f'wget -P ~ {config["prometheus"]["node_exporter_addr"]}') + + def configNodeExporterService(self): + logger.info(f'{self.dnode_ip}: configing /lib/systemd/system/node_exporter.service') + if not bool(int(self.dnode_conn.exec_cmd(f'[ -e /lib/systemd/system/node_exporter.service ] && echo 1 || echo 0'))): + self.dnode_conn.exec_cmd(f'sudo echo -e [Service]\n\ + User=prometheus\n\ + Group=prometheus\n\ + ExecStart=/usr/local/bin/node_exporter\n\ + [Install]\n\ + WantedBy=multi-user.target\n\ + [Unit]\n\ + Description=node_exporter\n\ + After=network.target \ + >> /lib/systemd/system/node_exporter.service') + + def killNodeExporter(self): + logger.info(f'{self.dnode_ip}: killing node_exporter') + self.dnode_conn.exec_cmd("ps -ef | grep -w node_exporter | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9") + + def deployNodeExporter(self): + logger.info(f'{self.dnode_ip}: deploying node_exporter') + self.killNodeExporter() + self.downloadNodeExporter() + tar_file_name = config["prometheus"]["node_exporter_addr"].split("/")[-1] + tar_file_dir = tar_file_name.replace(".tar.gz", "") + self.dnode_conn.exec_cmd(f'cd ~ && tar -xvf {tar_file_name} && cd {tar_file_dir} && cp -rf node_exporter /usr/local/bin') + self.configNodeExporterService() + self.dnode_conn.exec_cmd('sudo groupadd -r prometheus') + self.dnode_conn.exec_cmd('sudo useradd -r -g prometheus -s /sbin/nologin -M -c "prometheus Daemons" prometheus') + self.dnode_conn.exec_cmd('systemctl start node_exporter && systemctl enable node_exporter && systemctl status node_exporter') + + def downloadProcessExporter(self): + tar_file_name = config["prometheus"]["process_exporter_addr"].split("/")[-1] + logger.info(f'{self.dnode_ip}: downloading process_exporter from {config["prometheus"]["process_exporter_addr"]}') + if not bool(int(self.dnode_conn.exec_cmd(f'[ -e ~/{tar_file_name} ] && echo 1 || echo 0'))): + self.dnode_conn.exec_cmd(f'wget -P ~ {config["prometheus"]["process_exporter_addr"]}') + + def killProcessExporter(self): + logger.info(f'{self.dnode_ip}: killing process_exporter') + self.dnode_conn.exec_cmd("ps -ef | grep -w process_exporter | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9") + + def uploadProcessExporterYml(self, process_list): + logger.info(f'{self.dnode_ip}: generating process_exporter yml') + sub_list = list() + for process in process_list: + sub_list.append({'name':'{{.Comm}}', 'cmdline': [process]}) + djson = {'process_names': sub_list} + dstr=json.dumps(djson) + dyml=yaml.load(dstr, Loader=yaml.FullLoader) + stream = open('process_name.yml', 'w') + yaml.safe_dump(dyml, stream, default_flow_style=False) + self.dnode_conn.upload_file(self.home_dir, 'process_name.yml') + + def deployProcessExporter(self, process_list): + logger.info(f'{self.dnode_ip}: deploying process_exporter') + self.killProcessExporter() + self.downloadProcessExporter() + self.uploadProcessExporterYml(process_list) + tar_file_name = config["prometheus"]["process_exporter_addr"].split("/")[-1] + tar_file_dir = tar_file_name.replace(".tar.gz", "") + self.dnode_conn.exec_cmd(f'cd ~ && tar -xvf {tar_file_name} && mv -f ~/process_name.yml ~/{tar_file_dir}') + self.dnode_conn.exec_cmd(f'screen -d -m ~/{tar_file_dir}/process-exporter --config.path ~/{tar_file_dir}/process_name.yml') + + def deployTaosd(self, firstEp=None, deploy_type="taosd"): + ''' + deploy_type = taosd/taosadapter + ''' + self.dnode_conn.upload_file(self.home_dir, self.install_package) + if config["clean_env"]: + self.rmTaosCfg() + self.rmTaosdLog() + self.rmTaosdData() + package_name = self.install_package.split("/")[-1] + package_dir = '-'.join(package_name.split("-", 3)[0:3]) + self.stopTaosd() + self.killTaosd() + logger.info(f'{self.dnode_ip}: installing taosd') + logger.info(self.dnode_conn.exec_cmd(f'cd {self.home_dir} && tar -xvf {self.home_dir}/{package_name} && cd {package_dir} && yes|./install.sh')) + self.modifyTaosCfg(firstEp) + if deploy_type == "taosd": + self.startTaosd() + elif deploy_type == "taosadapter": + self.startTaosadapter() + if self.checkStatus(deploy_type): + logger.success(f'{self.dnode_ip}: {deploy_type} deploy success') + else: + logger.error(f'{self.dnode_ip}: {deploy_type} deploy failed, please check by manual') + sys.exit(1) + +class Dnodes: + def __init__(self): + self.dnodes = list() + self.ip_list = list() + index = 1 + for key in config: + if "taosd_dnode" in str(key): + self.dnodes.append(Dnode(index, config[key]["ip"], config[key]["port"], config[key]["username"], config[key]["password"])) + self.ip_list.append(config[key]["ip"]) + index += 1 + + def installDnodesPackage(self): + for index in range(len(self.dnodes)): + self.dnodes[index].installPackage() + + def rmDnodeTaosd(self, index): + self.dnodes[index - 1].rmTaosd() + + def rmDnodeTaosdLog(self, index): + self.dnodes[index - 1].rmTaosdLog() + + def rmDnodeTaosdData(self, index): + self.dnodes[index - 1].rmTaosdData() + + def rmDnodeTaosCfg(self, index): + self.dnodes[index - 1].rmTaosCfg() + + def modifyDnodeTaosCfg(self, index, taosCfgKey=None, taosCfgValue=None): + self.dnodes[index - 1].modifyTaosCfg(taosCfgKey, taosCfgValue) + + def configDnodesHostname(self): + for index in range(len(self.dnodes)): + self.dnodes[index].configHostname() + + def configDnodesHosts(self): + for index in range(len(self.dnodes)): + for ip in self.ip_list: + self.dnodes[index].configHosts(ip) + + def startDnodeTaosd(self, index): + self.dnodes[index - 1].startTaosd() + + def stopDnodeTaosd(self, index): + self.dnodes[index - 1].stopTaosd() + + def killDnodeTaosd(self, index): + self.dnodes[index - 1].killTaosd() + + def restartDnodeTaosd(self, index): + self.dnodes[index - 1].restartTaosd() + + def startAllTaosd(self): + for index in range(len(self.dnodes)): + self.dnodes[index].startTaosd() + + def stopAllTaosd(self): + for index in range(len(self.dnodes)): + self.dnodes[index].stopTaosd() + + def killAllTaosd(self): + for index in range(len(self.dnodes)): + self.dnodes[index].stopTaosd() + + def restartAllTaosd(self): + for index in range(len(self.dnodes)): + self.dnodes[index].restartTaosd() + + def startNodeTaosadapter(self, index): + self.dnodes[index - 1].startTaosadapter() + + def stopNodeTaosadapter(self, index): + self.dnodes[index - 1].stopTaosadapter() + + def killNodeTaosadapter(self, index): + self.dnodes[index - 1].killTaosadapter() + + def restartNodeTaosadapter(self, index): + self.dnodes[index - 1].restartTaosadapter() + + def startAllTaosadapters(self): + for index in range(len(self.dnodes)): + self.dnodes[index].startTaosadapter() + + def stopAllTaosadapters(self): + for index in range(len(self.dnodes)): + self.dnodes[index].stopTaosadapter() + + def killAllTaosadapters(self): + for index in range(len(self.dnodes)): + self.dnodes[index].killTaosadapter() + + def restartAllTaosadapters(self): + for index in range(len(self.dnodes)): + self.dnodes[index].restartTaosadapter() + + def configDnodesHostname(self): + for index in range(len(self.dnodes)): + self.dnodes[index].configHostname() + + def configDnodesHosts(self): + ip_hostname_dict = dict() + for index in range(len(self.dnodes)): + for ip in self.ip_list: + hostname = self.dnodes[index].getHostname(ip) + if hostname is not False: + ip_hostname_dict[ip] = hostname + for index in range(len(self.dnodes)): + for ip, hostname in ip_hostname_dict.items(): + self.dnodes[index].configHosts(ip, hostname) + + def deployNodes(self): + self.configDnodesHostname() + self.configDnodesHosts() + firstEp = f'{self.dnodes[0].configHostname()}:6030' + if not config["taosadapter_separate_deploy"] and not config["taosd_cluster"]: + self.dnodes[0].deployTaosd() + elif config["taosadapter_separate_deploy"] and not config["taosd_cluster"]: + for index in range(len(self.dnodes)): + if index == 0: + self.dnodes[index].deployTaosd(firstEp, "taosd") + else: + self.dnodes[index].deployTaosd(firstEp, "taosadapter") + else: + for index in range(len(self.dnodes)): + self.dnodes[index].deployTaosd(firstEp) + for index in range(len(self.dnodes)): + if index != 0: + self.dnodes[index].taoscCreateDnodes() + +if __name__ == '__main__': + deploy = Dnodes() + deploy.deployNodes() + diff --git a/tests/perftest-scripts/taosadapter_perftest/src/common/monitor.py b/tests/perftest-scripts/taosadapter_perftest/src/common/monitor.py new file mode 100644 index 0000000000000000000000000000000000000000..66b6f5bf977597afca6f2ecf9fb51ea9e0d1aba4 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/src/common/monitor.py @@ -0,0 +1,119 @@ +import sys +import json +sys.path.append("../../") +from config.env_init import * +from src.util.RemoteModule import RemoteModule +from src.common.dnodes import Dnodes, Dnode + +class Monitor: + def __init__(self): + self.monitor_ip = config["prometheus"]["ip"] + self.monitor_port = config["prometheus"]["port"] + self.monitor_username = config["prometheus"]["username"] + self.monitor_password = config["prometheus"]["password"] + self.monitor_conn = RemoteModule(self.monitor_ip, self.monitor_port, self.monitor_username, self.monitor_password) + self.dnodes = list() + index = 1 + for key in config: + if "taosd_dnode" in str(key): + self.dnodes.append(Dnode(index, config[key]["ip"], config[key]["port"], config[key]["username"], config[key]["password"])) + index += 1 + + if self.monitor_username == "root": + self.home_dir = "/root" + else: + self.home_dir = f"/home/{self.monitor_username}" + + def installDnodesPackage(self): + for index in range(len(self.dnodes)): + self.dnodes[index].installPackage() + + def deployAllNodeExporters(self): + for index in range(len(self.dnodes)): + self.dnodes[index].deployNodeExporter() + + def deployAllProcessExporters(self): + for index in range(len(self.dnodes)): + if index == 0: + self.dnodes[index].deployProcessExporter(['taosd', 'taosadapter']) + else: + if config['taosd_cluster'] and config['taosadapter_separate_deploy']: + self.dnodes[index].deployProcessExporter(['taosd', 'taosadapter']) + elif config['taosd_cluster'] and not config['taosadapter_separate_deploy']: + self.dnodes[index].deployProcessExporter(['taosd']) + elif not config['taosd_cluster'] and config['taosadapter_separate_deploy']: + self.dnodes[index].deployProcessExporter(['taosadapter']) + else: + pass + + def downloadPrometheus(self): + logger.info(f'{self.monitor_ip}: downloading prometheus from {config["prometheus"]["prometheus_addr"]}') + tar_file_name = config["prometheus"]["prometheus_addr"].split("/")[-1] + if not bool(int(self.monitor_conn.exec_cmd(f'[ -e ~/{tar_file_name} ] && echo 1 || echo 0'))): + self.monitor_conn.exec_cmd(f'wget -P ~ {config["prometheus"]["prometheus_addr"]}') + + def killPrometheus(self): + logger.info(f'{self.monitor_ip}: killing prometheus') + self.monitor_conn.exec_cmd("ps -ef | grep -w prometheus | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9") + + def uploadPrometheusYml(self): + logger.info('generating prometheus yml') + scrape_configs = [{'job_name': 'prometheus', 'static_configs': [{'targets': ['localhost:9090']}]}] + for index in range(len(self.dnodes)): + if not config['taosd_cluster'] and not config['taosadapter_separate_deploy']: + pass + else: + scrape_configs.append({'job_name': f'{self.dnodes[index].dnode_ip}_sys', 'static_configs': [{'targets': [f'{self.dnodes[index].dnode_ip}:9100'], 'labels': {'instance': f'{self.dnodes[index].dnode_ip}_sys'}}]}) + scrape_configs.append({'job_name': f'{self.dnodes[index].dnode_ip}', 'static_configs': [{'targets': [f'{self.dnodes[index].dnode_ip}:9256'], 'labels': {'instance': f'{self.dnodes[index].dnode_ip}'}}]}) + djson = {'global': {'scrape_interval': config["prometheus"]["scrape_interval"], 'evaluation_interval': config["prometheus"]["evaluation_interval"], 'scrape_timeout': config["prometheus"]["scrape_timeout"]}, 'alerting': {'alertmanagers': [{'static_configs': [{'targets': None}]}]}, 'rule_files': None, 'scrape_configs': scrape_configs} + dstr=json.dumps(djson) + dyml=yaml.load(dstr, Loader=yaml.FullLoader) + stream = open('prometheus.yml', 'w') + yaml.safe_dump(dyml, stream, default_flow_style=False) + self.monitor_conn.upload_file(self.home_dir, 'prometheus.yml') + + def deployPrometheus(self): + logger.info(f'{self.monitor_ip}: deploying prometheus') + self.installDnodesPackage() + self.killPrometheus() + self.downloadPrometheus() + self.uploadPrometheusYml() + tar_file_name = config["prometheus"]["prometheus_addr"].split("/")[-1] + tar_file_dir = tar_file_name.replace(".tar.gz", "") + self.monitor_conn.exec_cmd(f'cd ~ && tar -xvf {tar_file_name} && mv ~/prometheus.yml ~/{tar_file_dir}') + self.monitor_conn.exec_cmd(f'screen -d -m ~/{tar_file_dir}/prometheus --config.file={self.home_dir}/{tar_file_dir}/prometheus.yml') + + def installGrafana(self): + logger.info(f'{self.monitor_ip}: installing grafana') + if bool(int(self.monitor_conn.exec_cmd(f'cat /etc/os-release | grep ubuntu >> /dev/null && echo 1 || echo 0'))): + if not bool(int(self.monitor_conn.exec_cmd(f'sudo dpkg -s grafana >> /dev/null && echo 1 || echo 0'))): + self.monitor_conn.exec_cmd('sudo apt-get install -y apt-transport-https') + self.monitor_conn.exec_cmd('sudo apt-get install -y software-properties-common wget') + self.monitor_conn.exec_cmd('wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -') + self.monitor_conn.exec_cmd('echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list') + self.monitor_conn.exec_cmd('apt-get update') + self.monitor_conn.exec_cmd('sudo apt-get -y install grafana') + elif bool(int(self.monitor_conn.exec_cmd(f'cat /etc/os-release | grep centos >> /dev/null && echo 1 || echo 0'))): + if not bool(int(self.monitor_conn.exec_cmd(f'sudo rpm -qa | grep grafana >> /dev/null && echo 1 || echo 0'))): + self.monitor_conn.exec_cmd('rm -rf /etc/yum.repos.d/grafana.repo') + self.monitor_conn.exec_cmd('sudo echo -e "[grafana]\nname=grafana\nbaseurl=https://packages.grafana.com/oss/rpm\nrepo_gpgcheck=1\nenabled=1\ngpgcheck=1\ngpgkey=https://packages.grafana.com/gpg.key\nsslverify=1\nsslcacert=/etc/pki/tls/certs/ca-bundle.crt" \ + >> /etc/yum.repos.d/grafana.repo') + self.monitor_conn.exec_cmd('yum install -y grafana') + else: + pass + + def deployGrafana(self): + self.installGrafana() + self.monitor_conn.exec_cmd('systemctl daemon-reload') + self.monitor_conn.exec_cmd('systemctl start grafana-server') + self.monitor_conn.exec_cmd('systemctl enable grafana-server.service') + self.monitor_conn.exec_cmd('systemctl status grafana-server') + +if __name__ == '__main__': + deploy = Dnodes() + deploy.deployNodes() + monitor = Monitor() + monitor.deployAllNodeExporters() + monitor.deployAllProcessExporters() + monitor.deployPrometheus() + monitor.deployGrafana() diff --git a/tests/perftest-scripts/taosadapter_perftest/src/util/RemoteModule.py b/tests/perftest-scripts/taosadapter_perftest/src/util/RemoteModule.py new file mode 100644 index 0000000000000000000000000000000000000000..484ee2a3f0939540c37c8e90cbabc09e995523c6 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/src/util/RemoteModule.py @@ -0,0 +1,51 @@ +# -*-coding: utf-8-*- +from fabric import Connection +from config.env_init import * + +class RemoteModule(): + def __init__(self, ip, port, user, passwd): + self.ip = ip + self.port = port + self.user = user + self.passwd = passwd + + def upload_file(self, remote_dir, upload_file): + """ + remote_dir: remote upload dir + upload_file: local file with path + """ + try: + logger.info(f'{self.ip}: uploading {upload_file} to {remote_dir}') + c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd}) + c.put(upload_file, remote_dir) + c.close() + except Exception as e: + logger.error(f"{upload_file} send failed----{e}, please check config/perf_test.yaml") + + def download_file(self, remote_file_with_path, local_path): + """ + remote_file_with_path:: file with Absolute Path eg:/root/maple/bin/maple + local_path:: remote path eg:/root + """ + try: + c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd}) + c.get(remote_file_with_path, local_path) + c.close() + except Exception as e: + logger.error(f"download file {remote_file_with_path} failed:{e}"); + + def exec_cmd(self, cmd): + """ + cmd:: remote exec cmd + """ + try: + logger.info(f'{self.ip}: executing cmd: {cmd}') + c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd}) + result = c.run(cmd, pty=False, warn=True, hide=False) + c.close() + return result.stdout + except Exception as e: + logger.error(f"exec cmd {cmd} failed:{e}"); + +if __name__ == '__main__': + pass \ No newline at end of file diff --git a/tests/perftest-scripts/taosadapter_perftest/src/util/jmeter.py b/tests/perftest-scripts/taosadapter_perftest/src/util/jmeter.py new file mode 100644 index 0000000000000000000000000000000000000000..2f57ce9a3dc8062cbad581bd6e7c75ce8a6a35ae --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest/src/util/jmeter.py @@ -0,0 +1,63 @@ +import sys +sys.path.append("../../") +from config.env_init import * +from src.util.RemoteModule import RemoteModule +from src.common.common import Common + +class Jmeter: + def __init__(self): + self.Com = Common() + self.jmeter_ip = config["jmeter"]["ip"] + self.jmeter_port = config["jmeter"]["port"] + self.jmeter_username = config["jmeter"]["username"] + self.jmeter_password = config["jmeter"]["password"] + self.jmeter_conn = RemoteModule(self.jmeter_ip, self.jmeter_port, self.jmeter_username, self.jmeter_password) + self.tar_file_name = config["jmeter"]["jmeter_addr"].split("/")[-1] + self.tar_file_dir = self.tar_file_name.replace(".tgz", "") + if self.jmeter_username == "root": + self.home_dir = "/root" + else: + self.home_dir = f"/home/{self.jmeter_username}" + + def installPkg(self, pkg_name): + if bool(int(self.jmeter_conn.exec_cmd('cat /etc/os-release | grep ubuntu >> /dev/null && echo 1 || echo 0'))): + if not bool(int(self.jmeter_conn.exec_cmd(f'sudo dpkg -s {pkg_name} >> /dev/null && echo 1 || echo 0'))): + self.jmeter_conn.exec_cmd(f'sudo apt-get install -y {pkg_name}') + elif bool(int(self.jmeter_conn.exec_cmd(f'cat /etc/os-release | grep centos >> /dev/null && echo 1 || echo 0'))): + if not bool(int(self.jmeter_conn.exec_cmd(f'sudo rpm -qa | grep {pkg_name} >> /dev/null && echo 1 || echo 0'))): + self.jmeter_conn.exec_cmd(f'sudo yum install -y {pkg_name}') + else: + pass + + def installJava(self): + self.installPkg("openjdk-8-jdk") + + def downloadJmeter(self): + logger.info(f'{self.jmeter_ip}: downloading jmeter from {config["jmeter"]["jmeter_addr"]}') + if not bool(int(self.jmeter_conn.exec_cmd(f'[ -e ~/{self.tar_file_name} ] && echo 1 || echo 0'))): + self.jmeter_conn.exec_cmd(f'wget -P ~ {config["jmeter"]["jmeter_addr"]}') + + def deployJmeter(self): + logger.info(f'{self.jmeter_ip}: deploying jmeter') + self.downloadJmeter() + self.installJava() + if not bool(int(self.jmeter_conn.exec_cmd(f'ls ~/{self.tar_file_dir} >> /dev/null && echo 1 || echo 0'))): + self.jmeter_conn.exec_cmd(f'cd ~ && tar -xvf {self.tar_file_name}') + if not bool(int(self.jmeter_conn.exec_cmd(f'grep "^jmeter.reportgenerator.overall_granularity" ~/{self.tar_file_dir}/bin/user.properties >> /dev/null && echo 1 || echo 0'))): + self.jmeter_conn.exec_cmd(f'echo "jmeter.reportgenerator.overall_granularity=300000" >> ~/{self.tar_file_dir}/bin/user.properties') + if not bool(int(self.jmeter_conn.exec_cmd(f'ls /usr/local/{self.tar_file_dir} >> /dev/null && echo 1 || echo 0'))): + self.jmeter_conn.exec_cmd(f'mv ~/{self.tar_file_dir} /usr/local') + if not bool(int(self.jmeter_conn.exec_cmd(f'grep "jmeter" ~/.bashrc >> /dev/null && echo 1 || echo 0'))): + self.jmeter_conn.exec_cmd(f'echo "export PATH=$PATH:/usr/local/{self.tar_file_dir}/bin" >> ~/.bashrc') + # if bool(int(self.jmeter_conn.exec_cmd(f'jmeter -v >> /dev/null && echo 1 || echo 0'))): + # logger.success('deploy jmeter successful') + # else: + # logger.error('deploy jmeter failed') + # sys.exit(1) + return f"/usr/local/{self.tar_file_dir}/bin/jmeter" + +if __name__ == '__main__': + deploy = Jmeter() + deploy.deployJmeter() + + diff --git a/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py b/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py deleted file mode 100644 index 480c25206065fc62a53156e899c213f8e2b487db..0000000000000000000000000000000000000000 --- a/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py +++ /dev/null @@ -1,224 +0,0 @@ -from fabric import Connection -from loguru import logger -import shutil -import os -import time - -class TaosadapterPerftest(): - def __init__(self): - self.ip = "192.168.1.85" - self.port = "22" - self.user = "root" - self.passwd = "tbase125!" - self.telnetCreateStbJmxFile = "opentsdb_telnet_createStb.jmx" - self.telnetCreateTbJmxFile = "opentsdb_telnet_createTb.jmx" - self.telnetInsertRowsFile = "opentsdb_telnet_insertRows.jmx" - # self.telnetMixJmxFile = "opentsdb_telnet_MixTbRows.jmx" - self.telnetMixJmxFile = "opentsdb_telnet_jmeter_csv_import.jmx" - - self.jsonCreateStbJmxFile = "opentsdb_json_createStb.jmx" - self.jsonCreateTbJmxFile = "opentsdb_json_createTb.jmx" - self.jsonInsertRowsFile = "opentsdb_json_insertRows.jmx" - # self.jsonMixJmxFile = "opentsdb_json_MixTbRows.jmx" - self.jsonMixJmxFile = "opentsdb_json_jmeter_csv_import.jmx" - - self.logfile = "taosadapter_perftest.log" - self.createStbThreads = 100 - self.createTbThreads = 100 - self.insertRowsThreads = 24 - - logger.add(self.logfile) - - def exec_remote_cmd(self, cmd): - """ - remote exec shell cmd - """ - try: - c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd}) - result = c.run(cmd, pty=False, warn=True, hide=True).stdout - c.close() - return result - except Exception as e: - logger.error(f"exec cmd {cmd} failed:{e}"); - - def exec_local_cmd(self, shell_cmd): - ''' - exec local shell cmd - ''' - result = os.popen(shell_cmd).read().strip() - return result - - def modifyJxmLooptimes(self, filename, looptimes, row_count=None, import_file_name=None): - ''' - modify looptimes - ''' - with open(filename, "r", encoding="utf-8") as f: - lines = f.readlines() - with open(filename, "w", encoding="utf-8") as f_w: - for line in lines: - if "looptimes" in line: - line = line.replace("looptimes", looptimes) - if row_count is not None: - if "row_count" in line: - line = line.replace("row_count", row_count) - if import_file_name is not None: - if "import_file_name" in line: - line = line.replace("import_file_name", import_file_name) - f_w.write(line) - - def cleanAndRestartTaosd(self): - ''' - restart taosd and clean env - ''' - logger.info("---- restarting taosd and taosadapter ----") - self.exec_remote_cmd("systemctl stop taosd") - self.exec_remote_cmd("rm -rf /var/lib/taos/* /var/log/taos/*") - self.exec_remote_cmd("systemctl start taosd") - logger.info("---- finish restart ----") - time.sleep(60) - - def recreateReportDir(self, path): - ''' - recreate jmeter report path - ''' - if os.path.exists(path): - self.exec_local_cmd(f'rm -rf {path}/*') - else: - os.makedirs(path) - - def cleanLog(self): - ''' - clean log - ''' - with open(self.logfile, 'w') as f: - f.seek(0) - f.truncate() - - def genMixTbRows(self, filename, table_count, row_count): - logger.info('generating import data file') - ts_start = 1614530008000 - with open(filename, "w", encoding="utf-8") as f_w: - for i in range(table_count): - for j in range(row_count): - input_line = str(ts_start) + "," + str(i) + '\n' - ts_start += 1 - f_w.write(input_line) - - def outputParams(self, protocol, create_type): - ''' - procotol is "telnet" or "json" - create_type is "stb" or "tb" or "rows" - ''' - if protocol == "telnet": - if create_type == "stb": - return self.telnetCreateStbJmxFile, self.createStbThreads - elif create_type == "tb": - return self.telnetCreateTbJmxFile, self.createTbThreads - elif create_type == "rows": - return self.telnetInsertRowsFile, self.insertRowsThreads - else: - logger.error("create type error!") - else: - if create_type == "stb": - return self.jsonCreateStbJmxFile, self.createStbThreads - elif create_type == "tb": - return self.jsonCreateTbJmxFile, self.createTbThreads - elif create_type == "rows": - return self.jsonInsertRowsFile, self.insertRowsThreads - else: - logger.error("create type error!") - - def insertTDengine(self, procotol, create_type, count): - ''' - create stb/tb or insert rows - ''' - self.cleanAndRestartTaosd() - jmxfile, threads = self.outputParams(procotol, create_type) - handle_file = str(count) + jmxfile - report_dir = f'testreport/{handle_file}' - self.recreateReportDir(report_dir) - shutil.copyfile(jmxfile, handle_file) - replace_count = int(count/threads) - self.modifyJxmLooptimes(handle_file, str(replace_count)) - logger.info(f'jmeter running ----- jmeter -n -t {handle_file} -l {report_dir}/{handle_file}.txt -e -o {report_dir}') - result = self.exec_local_cmd(f"jmeter -n -t {handle_file} -l {report_dir}/{handle_file}.txt -e -o {report_dir}") - logger.info(result) - logger.info("----- sleep 120s and please record data -----") - time.sleep(120) - - def insertMixTbRows(self, procotol, table_count, row_count): - self.cleanAndRestartTaosd() - local_path = os.getcwd() - jmxfile = f"opentsdb_{procotol}_{table_count}Tb{row_count}Rows.jmx" - import_file_name = f"import_opentsdb_{procotol}_{table_count}Tb{row_count}Rows.txt" - import_file_path = local_path + '/' + import_file_name - self.genMixTbRows(import_file_name, table_count, row_count) - report_dir = f'testreport/{jmxfile}' - self.recreateReportDir(report_dir) - if procotol == "telnet": - shutil.copyfile(self.telnetMixJmxFile, jmxfile) - else: - shutil.copyfile(self.jsonMixJmxFile, jmxfile) - self.modifyJxmLooptimes(jmxfile, str(int(table_count*row_count/100)), import_file_name=import_file_path) - logger.info(f'jmeter running ----- jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}') - result = self.exec_local_cmd(f"jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}") - logger.info(result) - logger.info("----- sleep 120s and please record data -----") - time.sleep(120) - - # def insertMixTbRows(self, procotol, looptimes, row_count): - # self.cleanAndRestartTaosd() - # jmxfile = f"opentsdb_{procotol}_{looptimes}Tb100Rows.jmx" - # report_dir = f'testreport/{jmxfile}' - # self.recreateReportDir(report_dir) - # if procotol == "telnet": - # shutil.copyfile(self.telnetMixJmxFile, jmxfile) - # else: - # shutil.copyfile(self.jsonMixJmxFile, jmxfile) - - # self.modifyJxmLooptimes(jmxfile, str(looptimes), str(row_count)) - # result = self.exec_local_cmd(f"jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}") - # logger.info(result) - # logger.info("----- sleep 120s and please record data -----") - # time.sleep(120) - - - -if __name__ == '__main__': - taosadapterPerftest = TaosadapterPerftest() - taosadapterPerftest.cleanLog() - - logger.info('------------ Start testing the scenarios in the report chapter 3.4.1 ------------') - for procotol in ["telnet", "json"]: - logger.info(f'----- {procotol} protocol ------- Creating 30W stable ------------') - taosadapterPerftest.insertTDengine(procotol, "stb", 300000) - logger.info(f'----- {procotol} protocol ------- Creating 100W table with stb "cpu.usage_user" ------------') - taosadapterPerftest.insertTDengine(procotol, "tb", 1000000) - logger.info(f'----- {procotol} protocol ------- inserting 100W rows ------------') - taosadapterPerftest.insertTDengine(procotol, "rows", 1000000) - - logger.info(f'----- {procotol} protocol ------- Creating 50W stable ------------') - taosadapterPerftest.insertTDengine(procotol, "stb", 500000) - logger.info(f'----- {procotol} protocol ------- Creating 500W table with stb "cpu.usage_user" ------------') - taosadapterPerftest.insertTDengine(procotol, "tb", 5000000) - logger.info(f'----- {procotol} protocol ------- inserting 500W rows ------------') - taosadapterPerftest.insertTDengine(procotol, "rows", 5000000) - - logger.info(f'----- {procotol} protocol ------- Creating 100W stable ------------') - taosadapterPerftest.insertTDengine(procotol, "stb", 1000000) - logger.info(f'----- {procotol} protocol ------- Creating 1000W table with stb "cpu.usage_user" ------------') - taosadapterPerftest.insertTDengine(procotol, "tb", 10000000) - logger.info(f'----- {procotol} protocol ------- inserting 1000W rows ------------') - taosadapterPerftest.insertTDengine(procotol, "rows", 10000000) - - logger.info(f'----- {procotol} protocol ------- Creating 10W stable 1000Rows ------------') - taosadapterPerftest.insertMixTbRows(procotol, 100000, 1000) - - logger.info(f'----- {procotol} protocol ------- Creating 100W stable 100Rows ------------') - taosadapterPerftest.insertMixTbRows(procotol, 1000000, 100) - - logger.info(f'----- {procotol} protocol ------- Creating 500W stable 20Rows ------------') - taosadapterPerftest.insertMixTbRows(procotol, 5000000, 20) - - logger.info(f'----- {procotol} protocol ------- Creating 1000W stable 10Rows ------------') - taosadapterPerftest.insertMixTbRows(procotol, 10000000, 10) diff --git a/tests/perftest-scripts/taosadapter_perftest_old/README.CN.MD b/tests/perftest-scripts/taosadapter_perftest_old/README.CN.MD new file mode 100644 index 0000000000000000000000000000000000000000..6ae68bcfac228d5d4de299edc225852896f08b13 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest_old/README.CN.MD @@ -0,0 +1,16 @@ +# 注: 目前测试脚本需按测试报告中的环境说明手动部署taosd及taosadapter +- 测试报告: https://jira.taosdata.com:18090/pages/viewpage.action?pageId=127470422 +# 1. 单实例集中部署 +## 1.1 环境部署: 在192.168.1.85上直接编译develop分支启动taosd即可 +## 1.2 脚本运行方法: 在192.168.1.83环境直接运行python3 /home/ubuntu/TDengine/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py即可, 可输出测试报告中3.4.1和3.5.1中的测试结果, 结果会实时打印到同目录下的taosadapter_perftest.log中 + + +# 2. 多实例分离部署 +## 2.1 环境部署: 在192.168.1.98上部署taosd, 在192.168.1.83、84、85、86环境将部署taosadapter, 配置对应的/etc/hosts, 并将/etc/taos/taos.cfg中的firstEp指向taosd的地址(vm98:6030) +## 2.2 脚本运行方法: +- 在192.168.1.93环境根据节点数量需求运行: +- jmeter -n -t opentsdb_insertRows83.jmx +- jmeter -n -t opentsdb_insertRows84.jmx +- jmeter -n -t opentsdb_insertRows85.jmx +- ... +- 需实时监控各节点cpu占用情况, jmeter结果会直接打印到控制台 \ No newline at end of file diff --git a/tests/perftest-scripts/taosadapter_perftest_old/README.EN.MD b/tests/perftest-scripts/taosadapter_perftest_old/README.EN.MD new file mode 100644 index 0000000000000000000000000000000000000000..28d3927c4f9c0b17159eb38ea3e7dc90fa51d194 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest_old/README.EN.MD @@ -0,0 +1,16 @@ +# Note: The current test script needs to manually deploy taosd and taosadapter according to the environment description in the test report +- Test Report: https://jira.taosdata.com:18090/pages/viewpage.action?pageId=127470422 +# 1. Single instance centralized deployment +## 1.1 Environment deployment: directly compile the develop branch on 192.168.1.85 and start taosd +## 1.2 Script running method: Run python3 /home/ubuntu/TDengine/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py directly in the 192.168.1.83 environment, which can output the test results in 3.4.1 and 3.5.1 in the test report. The results will be printed in real time to taosadapter_perftest.log in the same directory + + +# 2. Separate deployment of multiple instances +## 2.1 Environment deployment: Deploy taosd on 192.168.1.98, deploy taosadapter in 192.168.1.83, 84, 85, 86 environments, configure the corresponding /etc/hosts, and point the firstEp in /etc/taos/taos.cfg to taosd Address (vm98:6030) +## 2.2 Script running method: +- Run according to the count of nodes in the 192.168.1.93 environment: +- jmeter -n -t opentsdb_insertRows83.jmx +- jmeter -n -t opentsdb_insertRows84.jmx +- jmeter -n -t opentsdb_insertRows85.jmx +- ... +- You need to monitor the CPU occupancy of each node in real time, and the jmeter results will be printed directly to the console. \ No newline at end of file diff --git a/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows83.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows83.jmx new file mode 100644 index 0000000000000000000000000000000000000000..68dd162f8f5f3093fb55f30e25eac75f18b42ba7 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows83.jmx @@ -0,0 +1,200 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test83 precision 'ms' + = + + + + + + + + http://192.168.1.83:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + 10000000 + + 24 + + false + + + true + + + + true + + + + false + cpu.usage_user.rows ${ts_counter} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://192.168.1.83:6041/opentsdb/v1/put/telnet/test83 + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1614530008000 + + 1 + ts_counter + + false + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows84.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows84.jmx new file mode 100644 index 0000000000000000000000000000000000000000..5a541f9eca2eb59dcd99477dcd7e433f37afef48 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows84.jmx @@ -0,0 +1,200 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test84 precision 'ms' + = + + + + + + + + http://192.168.1.84:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + 1000000 + + 24 + + false + + + true + + + + true + + + + false + cpu.usage_user.rows ${ts_counter} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://192.168.1.84:6041/opentsdb/v1/put/telnet/test84 + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1614530008000 + + 1 + ts_counter + + false + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows85.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows85.jmx new file mode 100644 index 0000000000000000000000000000000000000000..ec9d786cd0d27095892980a74869593c7c4dcf93 --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows85.jmx @@ -0,0 +1,200 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test85 precision 'ms' + = + + + + + + + + http://192.168.1.85:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + 1000000 + + 24 + + false + + + true + + + + true + + + + false + cpu.usage_user.rows ${ts_counter} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://192.168.1.85:6041/opentsdb/v1/put/telnet/test85 + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1614530008000 + + 1 + ts_counter + + false + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows86.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows86.jmx new file mode 100644 index 0000000000000000000000000000000000000000..1caa1fa24f0b61ca3c3b80f4f2f4e1b4ed1f733d --- /dev/null +++ b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_insertRows86.jmx @@ -0,0 +1,200 @@ + + + + + + false + true + false + + + + + + + + continue + + false + 1 + + 1 + 1 + false + + + true + + + + true + + + + false + create database if not exists test86 precision 'ms' + = + + + + + + + + http://192.168.1.86:6041/rest/sql + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + + continue + + false + 1000000 + + 24 + + false + + + true + + + + true + + + + false + cpu.usage_user.rows ${ts_counter} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC + = + + + + + + + + http://192.168.1.86:6041/opentsdb/v1/put/telnet/test86 + POST + true + false + true + false + + + + + + + false + + saveConfig + + + true + true + true + + true + true + true + true + false + true + true + false + false + false + true + false + false + false + true + 0 + true + true + true + true + true + true + + + + + + + + + Authorization + Basic cm9vdDp0YW9zZGF0YQ== + + + + + + 1614530008000 + + 1 + ts_counter + + false + + + + + + diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_MixTbRows.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_MixTbRows.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_json_MixTbRows.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_MixTbRows.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createStb.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_createStb.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createStb.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_createStb.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createTb.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_createTb.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createTb.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_createTb.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_insertRows.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_insertRows.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_json_insertRows.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_insertRows.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_jmeter_csv_import.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_jmeter_csv_import.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_json_jmeter_csv_import.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_json_jmeter_csv_import.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_MixTbRows.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_MixTbRows.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_MixTbRows.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_MixTbRows.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createStb.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_createStb.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createStb.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_createStb.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createTb.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_createTb.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createTb.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_createTb.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_insertRows.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_insertRows.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_insertRows.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_insertRows.jmx diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_jmeter_csv_import.jmx b/tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_jmeter_csv_import.jmx similarity index 100% rename from tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_jmeter_csv_import.jmx rename to tests/perftest-scripts/taosadapter_perftest_old/opentsdb_telnet_jmeter_csv_import.jmx diff --git a/tests/develop-test/0-management/3-tag/.gitkeep b/tests/pytest/fulltest-cluster.sh old mode 100644 new mode 100755 similarity index 100% rename from tests/develop-test/0-management/3-tag/.gitkeep rename to tests/pytest/fulltest-cluster.sh diff --git a/tests/pytest/fulltest-connector.sh b/tests/pytest/fulltest-connector.sh index 701c316040970b9077e6c730c1346dcf8759f673..1e456503989f47a20a4595c86a1df0b4c3a32946 100755 --- a/tests/pytest/fulltest-connector.sh +++ b/tests/pytest/fulltest-connector.sh @@ -1,13 +1,2 @@ #!/bin/bash -ulimit -c unlimited -#======================p1-start=============== - -# restful test for python -# python3 test.py -f restful/restful_bind_db1.py -# python3 test.py -f restful/restful_bind_db2.py -python3 ./test.py -f client/nettest.py - -python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_query.py -python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_insert.py - -#======================p1-end=============== +ulimit -c unlimited \ No newline at end of file diff --git a/tests/pytest/fulltest-insert.sh b/tests/pytest/fulltest-insert.sh index 85b36bda29a047c788eb00b991bb890a2c270bac..153bc072dba128fa8f5635e26aba0d30066b9c9a 100755 --- a/tests/pytest/fulltest-insert.sh +++ b/tests/pytest/fulltest-insert.sh @@ -4,7 +4,6 @@ ulimit -c unlimited python3 testCompress.py python3 testNoCompress.py - python3 ./test.py -f import_merge/importBlock1HO.py python3 ./test.py -f import_merge/importBlock1HPO.py python3 ./test.py -f import_merge/importBlock1H.py @@ -23,10 +22,6 @@ python3 ./test.py -f import_merge/importBlock2TPO.py python3 ./test.py -f import_merge/importBlock2T.py python3 ./test.py -f import_merge/importBlockbetween.py python3 ./test.py -f import_merge/importCacheFileHO.py - -#======================p1-end=============== -#======================p2-start=============== - python3 ./test.py -f import_merge/importCacheFileHPO.py python3 ./test.py -f import_merge/importCacheFileH.py python3 ./test.py -f import_merge/importCacheFileS.py @@ -48,10 +43,6 @@ python3 ./test.py -f import_merge/importDataLastTPO.py python3 ./test.py -f import_merge/importDataLastT.py python3 ./test.py -f import_merge/importDataS.py python3 ./test.py -f import_merge/importDataSub.py - -#======================p2-end=============== -#======================p3-start=============== - python3 ./test.py -f import_merge/importDataTO.py python3 ./test.py -f import_merge/importDataTPO.py python3 ./test.py -f import_merge/importDataT.py @@ -73,10 +64,6 @@ python3 ./test.py -f import_merge/importSpan.py python3 ./test.py -f import_merge/importSRestart.py python3 ./test.py -f import_merge/importSubRestart.py python3 ./test.py -f import_merge/importTailOverlap.py - -#======================p3-end=============== -#======================p4-start=============== - python3 ./test.py -f import_merge/importTailPartOverlap.py python3 ./test.py -f import_merge/importTail.py python3 ./test.py -f import_merge/importToCommit.py @@ -88,7 +75,6 @@ python3 ./test.py -f import_merge/importCSV.py python3 ./test.py -f import_merge/import_update_0.py python3 ./test.py -f import_merge/import_update_1.py python3 ./test.py -f import_merge/import_update_2.py - python3 ./test.py -f insert/basic.py python3 ./test.py -f insert/int.py python3 ./test.py -f insert/float.py @@ -98,8 +84,6 @@ python3 ./test.py -f insert/double.py python3 ./test.py -f insert/smallint.py python3 ./test.py -f insert/tinyint.py python3 ./test.py -f insert/date.py - - python3 ./test.py -f insert/binary.py python3 ./test.py -f insert/nchar.py #python3 ./test.py -f insert/nchar-boundary.py @@ -133,41 +117,21 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py #python3 ./test.py -f insert/schemalessInsert.py #python3 ./test.py -f insert/openTsdbJsonInsert.py python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py - - -# update python3 ./test.py -f update/merge_commit_data.py python3 ./test.py -f update/allow_update.py python3 ./test.py -f update/allow_update-0.py python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last.py - - python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2_update0.py python3 ./test.py -f update/merge_commit_last-0.py python3 ./test.py -f update/merge_commit_last.py python3 ./test.py -f update/update_options.py python3 ./test.py -f update/merge_commit_data-0.py - -# wal python3 ./test.py -f wal/addOldWalTest.py python3 ./test.py -f wal/sdbComp.py -#======================p4-end=============== -#======================p5-start=============== -python3 ./test.py -f ../system-test/1-insert/0-sql/basic.py -python3 ./test.py -f ../develop-test/1-insert/0-sql/basic.py -python3 ./test.py -f ../develop-test/1-insert/0-sql/batchInsert.py - -#======================p5-end=============== - - - - - - diff --git a/tests/develop-test/0-management/4-others/.gitkeep b/tests/pytest/fulltest-taosAdapter.sh old mode 100644 new mode 100755 similarity index 100% rename from tests/develop-test/0-management/4-others/.gitkeep rename to tests/pytest/fulltest-taosAdapter.sh diff --git a/tests/pytest/functions/function_elapsed_case.py b/tests/pytest/functions/function_elapsed_case.py index 98a76ab9a82aaa09bdad86a8bb1fc2030b58043e..50fbb0fe3244ec214e040f43962321a28ed31d9b 100644 --- a/tests/pytest/functions/function_elapsed_case.py +++ b/tests/pytest/functions/function_elapsed_case.py @@ -59,7 +59,7 @@ class ElapsedCase: tdSql.query("select elapsed(ts), elapsed(ts, 10m), elapsed(ts, 100m) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") tdSql.checkEqual(int(tdSql.getData(0, 1)), 99) tdSql.checkEqual(int(tdSql.getData(0, 2)), 9) - # stddev(f), + # stddev(f), tdSql.query("select elapsed(ts), count(*), avg(f), twa(f), irate(f), sum(f), min(f), max(f), first(f), last(f), apercentile(i, 30), last_row(i), spread(i) " "from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") tdSql.checkRows(2) @@ -100,7 +100,7 @@ class ElapsedCase: tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' session(ts, 70s)") tdSql.checkRows(1) - + # It has little to do with the elapsed function, so just simple test. def stateWindowTest(self): tdSql.execute("use wxy_db") @@ -110,7 +110,7 @@ class ElapsedCase: tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' state_window(b)") tdSql.checkRows(2) - + def intervalTest(self): tdSql.execute("use wxy_db") @@ -186,7 +186,7 @@ class ElapsedCase: else: subtable[result[i][tbnameCol]].append(result[i][elapsedCol]) return subtable - + def doOrderbyCheck(self, resultAsc, resultdesc): resultRows = len(resultAsc) for i in range(resultRows): @@ -222,6 +222,13 @@ class ElapsedCase: self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(222m) group by tbname", 1, 2) self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1000m) group by tbname", 1, 2) + #nested query + resAsc = tdSql.getResult("select elapsed(ts) from (select csum(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00')") + resDesc = tdSql.getResult("select elapsed(ts) from (select csum(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' order by ts desc)") + resRows = len(resAsc) + for i in range(resRows): + tdSql.checkEqual(resAsc[i][0], resDesc[resRows - i - 1][0]) + def slimitCheck(self, sql): tdSql.checkEqual(tdSql.query(sql + " slimit 0"), 0) tdSql.checkEqual(tdSql.query(sql + " slimit 1 soffset 0"), tdSql.query(sql + " slimit 0, 1")) @@ -307,7 +314,7 @@ class ElapsedCase: "select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-22 02:00:00' group by tbname") self.unionAllCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m) group by tbname", "select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(222m) group by tbname") - + # It has little to do with the elapsed function, so just simple test. def continuousQueryTest(self): tdSql.execute("use wxy_db") diff --git a/tests/pytest/query/queryLike.py b/tests/pytest/query/queryLike.py index b3916ed84db1d558e4b95f62c2def19deee75944..44b0ba8756bafaae11977e7b1b282ddc9fd3bf2c 100644 --- a/tests/pytest/query/queryLike.py +++ b/tests/pytest/query/queryLike.py @@ -161,6 +161,17 @@ class TDTestCase: tdSql.query("select * from st where tagg like 'tag\_\__\_';") tdSql.checkData(0,0, "tag__a_") + tdSql.execute("create table stb(ts timestamp, c0 int) tags(t0 nchar(64))") + tdSql.execute("insert into tb1 using stb tags('测试ABCabc') values(now, 1)") + tdSql.query("select * from tb1 where t0 like '%试AB%'") + tdSql.checkRows(1) + + tdSql.query("select * from tb1 where t0 like '测试AB%'") + tdSql.checkRows(1) + + tdSql.query("select * from tb1 where t0 like '%ABCabc'") + tdSql.checkRows(1) + os.system("rm -rf ./*.py.sql") def stop(self): diff --git a/tests/pytest/tools/taosdemoAllTest/insert-chinese.json b/tests/pytest/tools/taosdemoAllTest/insert-chinese.json index b7f3be9546c61fe895979cdc13e39eea5a322400..14a56826744f52a01f55b85f6d84744f6b458b70 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-chinese.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-chinese.json @@ -42,7 +42,7 @@ "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 150, + "insert_rows": 100, "childtable_limit": -1, "childtable_offset":0, "multi_thread_write_one_tbl": "no", @@ -58,6 +58,138 @@ "tags_file": "", "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "rest", + "insert_rows": 100, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 30, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 100, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 40, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 100, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 50, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "insert_rows": 100, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}] + }, + { + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 60, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "insert_rows": 100, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] }] }] } diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py index 06236a1d0175e4f685b29584cc0456e621fb754b..1154beda7846065001093898d617c0292fc8da05 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -52,319 +52,324 @@ class TDTestCase: os.system("rm -rf ./insert*_res.txt*") os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) - # insert: create one or mutiple tables per sql and insert multiple rows per sql - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) - tdSql.execute("use db") - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 11) - tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 10) - tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 1100) - tdSql.query("select count(*) from stb01_1") - tdSql.checkData(0, 0, 200) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 2000) - - # # restful connector insert data - # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertRestful.json -y " % binPath) + # # insert: create one or mutiple tables per sql and insert multiple rows per sql + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) # tdSql.execute("use db") # tdSql.query("select count (tbname) from stb0") - # tdSql.checkData(0, 0, 10) + # tdSql.checkData(0, 0, 11) # tdSql.query("select count (tbname) from stb1") # tdSql.checkData(0, 0, 10) # tdSql.query("select count(*) from stb00_0") - # tdSql.checkData(0, 0, 10) - # tdSql.query("select count(*) from stb0") # tdSql.checkData(0, 0, 100) + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 1100) # tdSql.query("select count(*) from stb01_1") + # tdSql.checkData(0, 0, 200) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 2000) + + # # # restful connector insert data + # # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertRestful.json -y " % binPath) + # # tdSql.execute("use db") + # # tdSql.query("select count (tbname) from stb0") + # # tdSql.checkData(0, 0, 10) + # # tdSql.query("select count (tbname) from stb1") + # # tdSql.checkData(0, 0, 10) + # # tdSql.query("select count(*) from stb00_0") + # # tdSql.checkData(0, 0, 10) + # # tdSql.query("select count(*) from stb0") + # # tdSql.checkData(0, 0, 100) + # # tdSql.query("select count(*) from stb01_1") + # # tdSql.checkData(0, 0, 20) + # # tdSql.query("select count(*) from stb1") + # # tdSql.checkData(0, 0, 200) + + # # default values json files + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-default.json -y " % binPath) + # tdSql.query("show databases;") + # for i in range(tdSql.queryRows): + # if tdSql.queryResult[i][0] == 'db': + # tdSql.checkData(i, 2, 100) + # tdSql.checkData(i, 4, 1) + # tdSql.checkData(i, 6, 10) + # tdSql.checkData(i, 16, 'ms') + + # # insert: create mutiple tables per sql and insert one rows per sql . + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("select count (tbname) from stb0") + # tdSql.checkData(0, 0, 10) + # tdSql.query("select count (tbname) from stb1") # tdSql.checkData(0, 0, 20) + # tdSql.query("select count(*) from stb00_0") + # tdSql.checkData(0, 0, 100) + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 1000) + # tdSql.query("select count(*) from stb01_0") + # tdSql.checkData(0, 0, 200) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 4000) + + # # insert: using parament "insert_interval to controls spped of insert. + # # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) + # tdSql.execute("use db") + # tdSql.query("show stables") + # tdSql.checkData(0, 4, 10) + # tdSql.query("select count(*) from stb00_0") + # tdSql.checkData(0, 0, 200) + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 2000) + # tdSql.query("show stables") + # tdSql.checkData(1, 4, 20) + # tdSql.query("select count(*) from stb01_0") + # tdSql.checkData(0, 0, 200) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 4000) + + # # spend 2min30s for 3 testcases. + # # insert: drop and child_table_exists combination test + # # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) + # tdSql.error("show dbno.stables") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) + # tdSql.execute("use db") + # tdSql.query("select count (tbname) from stb0") + # tdSql.checkData(0, 0, 5) + # tdSql.query("select count (tbname) from stb1") + # tdSql.checkData(0, 0, 6) + # tdSql.query("select count (tbname) from stb2") + # tdSql.checkData(0, 0, 7) + # tdSql.query("select count (tbname) from stb3") + # tdSql.checkData(0, 0, 8) + # tdSql.query("select count (tbname) from stb4") + # tdSql.checkData(0, 0, 8) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) + # tdSql.execute("use db") + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 50) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 240) + # tdSql.query("select count(*) from stb2") + # tdSql.checkData(0, 0, 220) + # tdSql.query("select count(*) from stb3") + # tdSql.checkData(0, 0, 180) + # tdSql.query("select count(*) from stb4") + # tdSql.checkData(0, 0, 160) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) + # tdSql.execute("use db") + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 150) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 360) + # tdSql.query("select count(*) from stb2") + # tdSql.checkData(0, 0, 360) + # tdSql.query("select count(*) from stb3") + # tdSql.checkData(0, 0, 340) + # tdSql.query("select count(*) from stb4") + # tdSql.checkData(0, 0, 400) + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) + # tdSql.execute("use db") + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 50) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 120) + # tdSql.query("select count(*) from stb2") + # tdSql.checkData(0, 0, 140) + # tdSql.query("select count(*) from stb3") + # tdSql.checkData(0, 0, 160) + # tdSql.query("select count(*) from stb4") + # tdSql.checkData(0, 0, 160) + + + # # insert: let parament in json file is illegal, it'll expect error. + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json -y " % binPath) + # tdSql.error("use db") + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertSigcolumnsNum4096.json -y " % binPath) + # tdSql.error("select * from db.stb0") + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsAndTagNum4096.json -y " % binPath) + # tdSql.query("select count(*) from db.stb0") + # tdSql.checkData(0, 0, 10000) + + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath) + # tdSql.query("select count(*) from db.stb0") + # tdSql.checkRows(0) + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("show stables like 'stb0%' ") + # tdSql.checkData(0, 2, 11) + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) + # tdSql.error("use db1") + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json -y " % binPath) + # tdSql.query("select count(*) from db.stb0") + # tdSql.checkRows(1) + # tdSql.query("select count(*) from db.stb1") + # tdSql.checkRows(1) + # tdSql.error("select * from db.stb4") + # tdSql.error("select * from db.stb2") + # tdSql.query("select count(*) from db.stb3") + # tdSql.checkRows(1) + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151-error.json -y " % binPath) + # tdSql.error("select * from db.stb4") + # tdSql.error("select * from db.stb2") + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) + # tdSql.error("select count(*) from db.stb0") + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) + # tdSql.error("use db") + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) + # tdSql.error("use db") + # tdSql.execute("drop database if exists db") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) + # tdSql.error("use db") + # tdSql.execute("drop database if exists blf") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) + # tdSql.execute("use blf") + # tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") + # tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") + # tdSql.query("select first(ts) from blf.p_0_topics_2") + # tdSql.checkData(0, 0, "2019-10-01 00:00:00") + # tdSql.query("select last(ts) from blf.p_0_topics_6 ") + # tdSql.checkData(0, 0, "2020-09-29 23:59:00") + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 5000000) # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 5000000) + + + + # # insert: timestamp and step + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("show stables") + # tdSql.query("select count (tbname) from stb0") + # tdSql.checkData(0, 0, 10) + # tdSql.query("select count (tbname) from stb1") + # tdSql.checkData(0, 0, 20) + # tdSql.query("select last(ts) from db.stb00_0") + # tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + # tdSql.query("select count(*) from stb0") # tdSql.checkData(0, 0, 200) + # tdSql.query("select last(ts) from db.stb01_0") + # tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 400) - # default values json files - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-default.json -y " % binPath) - tdSql.query("show databases;") - for i in range(tdSql.queryRows): - if tdSql.queryResult[i][0] == 'db': - tdSql.checkData(i, 2, 100) - tdSql.checkData(i, 4, 1) - tdSql.checkData(i, 6, 10) - tdSql.checkData(i, 16, 'ms') - - # insert: create mutiple tables per sql and insert one rows per sql . - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) - tdSql.execute("use db") - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 10) - tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 20) - tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 1000) - tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 200) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 4000) - - # insert: using parament "insert_interval to controls spped of insert. - # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) - tdSql.execute("use db") - tdSql.query("show stables") - tdSql.checkData(0, 4, 10) - tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 200) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 2000) - tdSql.query("show stables") - tdSql.checkData(1, 4, 20) - tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 200) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 4000) - - # spend 2min30s for 3 testcases. - # insert: drop and child_table_exists combination test - # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) - tdSql.error("show dbno.stables") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) - tdSql.execute("use db") - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 5) - tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 6) - tdSql.query("select count (tbname) from stb2") - tdSql.checkData(0, 0, 7) - tdSql.query("select count (tbname) from stb3") - tdSql.checkData(0, 0, 8) - tdSql.query("select count (tbname) from stb4") - tdSql.checkData(0, 0, 8) - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) - tdSql.execute("use db") - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 240) - tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 220) - tdSql.query("select count(*) from stb3") - tdSql.checkData(0, 0, 180) - tdSql.query("select count(*) from stb4") - tdSql.checkData(0, 0, 160) - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) - tdSql.execute("use db") - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 150) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 360) - tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 360) - tdSql.query("select count(*) from stb3") - tdSql.checkData(0, 0, 340) - tdSql.query("select count(*) from stb4") - tdSql.checkData(0, 0, 400) - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) - tdSql.execute("use db") - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 120) - tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 140) - tdSql.query("select count(*) from stb3") - tdSql.checkData(0, 0, 160) - tdSql.query("select count(*) from stb4") - tdSql.checkData(0, 0, 160) - - - # insert: let parament in json file is illegal, it'll expect error. - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertSigcolumnsNum4096.json -y " % binPath) - tdSql.error("select * from db.stb0") - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsAndTagNum4096.json -y " % binPath) - tdSql.query("select count(*) from db.stb0") - tdSql.checkData(0, 0, 10000) - - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath) - tdSql.query("select count(*) from db.stb0") - tdSql.checkRows(0) - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath) - tdSql.execute("use db") - tdSql.query("show stables like 'stb0%' ") - tdSql.checkData(0, 2, 11) - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) - tdSql.error("use db1") - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json -y " % binPath) - tdSql.query("select count(*) from db.stb0") - tdSql.checkRows(1) - tdSql.query("select count(*) from db.stb1") - tdSql.checkRows(1) - tdSql.error("select * from db.stb4") - tdSql.error("select * from db.stb2") - tdSql.query("select count(*) from db.stb3") - tdSql.checkRows(1) - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151-error.json -y " % binPath) - tdSql.error("select * from db.stb4") - tdSql.error("select * from db.stb2") - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) - tdSql.error("select count(*) from db.stb0") - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists db") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath) - tdSql.error("use db") - tdSql.execute("drop database if exists blf") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath) - tdSql.execute("use blf") - tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") - tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") - tdSql.query("select first(ts) from blf.p_0_topics_2") - tdSql.checkData(0, 0, "2019-10-01 00:00:00") - tdSql.query("select last(ts) from blf.p_0_topics_6 ") - tdSql.checkData(0, 0, "2020-09-29 23:59:00") - os.system("%staosBenchmark -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) - tdSql.execute("use db") - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 5000000) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 5000000) - - - - # insert: timestamp and step - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) - tdSql.execute("use db") - tdSql.query("show stables") - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 10) - tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 20) - tdSql.query("select last(ts) from db.stb00_0") - tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 200) - tdSql.query("select last(ts) from db.stb01_0") - tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 400) - - # # insert: disorder_ratio - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) - tdSql.execute("use db") - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1) - tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 1) - tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10) - tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 10) - - # insert: sample json - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-sample-ts.json -y " % binPath) - tdSql.execute("use dbtest123") - tdSql.query("select c2 from stb0") - tdSql.checkData(0, 0, 2147483647) - tdSql.query("select c0 from stb0_0 order by ts") - tdSql.checkData(3, 0, 4) - tdSql.query("select count(*) from stb0 order by ts") - tdSql.checkData(0, 0, 40) - tdSql.query("select * from stb0_1 order by ts") - tdSql.checkData(0, 0, '2021-10-28 15:34:44.735') - tdSql.checkData(3, 0, '2021-10-31 15:34:44.735') - tdSql.query("select * from stb1 where t1=-127") - tdSql.checkRows(20) - tdSql.query("select * from stb1 where t2=127") - tdSql.checkRows(10) - tdSql.query("select * from stb1 where t2=126") - tdSql.checkRows(10) - - # insert: sample json - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) - tdSql.execute("use dbtest123") - tdSql.query("select c2 from stb0") - tdSql.checkData(0, 0, 2147483647) - tdSql.query("select * from stb1 where t1=-127") - tdSql.checkRows(20) - tdSql.query("select * from stb1 where t2=127") - tdSql.checkRows(10) - tdSql.query("select * from stb1 where t2=126") - tdSql.checkRows(10) - - - # insert: test interlace parament - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) - tdSql.execute("use db") - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 100) - tdSql.query("select count (*) from stb0") - tdSql.checkData(0, 0, 15000) - - - # # insert: auto_create - - tdSql.execute('drop database if exists db') - tdSql.execute('create database db') - tdSql.execute('use db') - os.system("%staosBenchmark -y -f tools/taosdemoAllTest/insert-drop-exist-auto-N00.json " % binPath) # drop = no, child_table_exists, auto_create_table varies - tdSql.execute('use db') - tdSql.query('show tables like \'NN123%\'') #child_table_exists = no, auto_create_table varies = 123 - tdSql.checkRows(20) - tdSql.query('show tables like \'NNN%\'') #child_table_exists = no, auto_create_table varies = no - tdSql.checkRows(20) - tdSql.query('show tables like \'NNY%\'') #child_table_exists = no, auto_create_table varies = yes - tdSql.checkRows(20) - tdSql.query('show tables like \'NYN%\'') #child_table_exists = yes, auto_create_table varies = no - tdSql.checkRows(0) - tdSql.query('show tables like \'NY123%\'') #child_table_exists = yes, auto_create_table varies = 123 - tdSql.checkRows(0) - tdSql.query('show tables like \'NYY%\'') #child_table_exists = yes, auto_create_table varies = yes - tdSql.checkRows(0) - - tdSql.execute('drop database if exists db') - os.system("%staosBenchmark -y -f tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies - tdSql.execute('use db') - tdSql.query('show tables like \'YN123%\'') #child_table_exists = no, auto_create_table varies = 123 - tdSql.checkRows(20) - tdSql.query('show tables like \'YNN%\'') #child_table_exists = no, auto_create_table varies = no - tdSql.checkRows(20) - tdSql.query('show tables like \'YNY%\'') #child_table_exists = no, auto_create_table varies = yes - tdSql.checkRows(20) - tdSql.query('show tables like \'YYN%\'') #child_table_exists = yes, auto_create_table varies = no - tdSql.checkRows(20) - tdSql.query('show tables like \'YY123%\'') #child_table_exists = yes, auto_create_table varies = 123 - tdSql.checkRows(20) - tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes - tdSql.checkRows(20) - - # insert: test chinese encoding - os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-chinese.json -y " % binPath) - tdSql.execute("use db") - tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 10) - tdSql.query("select count (*) from stb0") - tdSql.checkData(0, 0, 1500) + # # # insert: disorder_ratio + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("select count (tbname) from stb0") + # tdSql.checkData(0, 0, 1) + # tdSql.query("select count (tbname) from stb1") + # tdSql.checkData(0, 0, 1) + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 10) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 10) + # # insert: sample json + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-sample-ts.json -y " % binPath) + # tdSql.execute("use dbtest123") + # tdSql.query("select c2 from stb0") + # tdSql.checkData(0, 0, 2147483647) + # tdSql.query("select c0 from stb0_0 order by ts") + # tdSql.checkData(3, 0, 4) + # tdSql.query("select count(*) from stb0 order by ts") + # tdSql.checkData(0, 0, 40) + # tdSql.query("select * from stb0_1 order by ts") + # tdSql.checkData(0, 0, '2021-10-28 15:34:44.735') + # tdSql.checkData(3, 0, '2021-10-31 15:34:44.735') + # tdSql.query("select * from stb1 where t1=-127") + # tdSql.checkRows(20) + # tdSql.query("select * from stb1 where t2=127") + # tdSql.checkRows(10) + # tdSql.query("select * from stb1 where t2=126") + # tdSql.checkRows(10) + + # # insert: sample json + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) + # tdSql.execute("use dbtest123") + # tdSql.query("select c2 from stb0") + # tdSql.checkData(0, 0, 2147483647) + # tdSql.query("select * from stb1 where t1=-127") + # tdSql.checkRows(20) + # tdSql.query("select * from stb1 where t2=127") + # tdSql.checkRows(10) + # tdSql.query("select * from stb1 where t2=126") + # tdSql.checkRows(10) + + + # # insert: test interlace parament + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("select count (tbname) from stb0") + # tdSql.checkData(0, 0, 100) + # tdSql.query("select count (*) from stb0") + # tdSql.checkData(0, 0, 15000) + + + # # # insert: auto_create + + # tdSql.execute('drop database if exists db') + # tdSql.execute('create database db') + # tdSql.execute('use db') + # os.system("%staosBenchmark -y -f tools/taosdemoAllTest/insert-drop-exist-auto-N00.json " % binPath) # drop = no, child_table_exists, auto_create_table varies + # tdSql.execute('use db') + # tdSql.query('show tables like \'NN123%\'') #child_table_exists = no, auto_create_table varies = 123 + # tdSql.checkRows(20) + # tdSql.query('show tables like \'NNN%\'') #child_table_exists = no, auto_create_table varies = no + # tdSql.checkRows(20) + # tdSql.query('show tables like \'NNY%\'') #child_table_exists = no, auto_create_table varies = yes + # tdSql.checkRows(20) + # tdSql.query('show tables like \'NYN%\'') #child_table_exists = yes, auto_create_table varies = no + # tdSql.checkRows(0) + # tdSql.query('show tables like \'NY123%\'') #child_table_exists = yes, auto_create_table varies = 123 + # tdSql.checkRows(0) + # tdSql.query('show tables like \'NYY%\'') #child_table_exists = yes, auto_create_table varies = yes + # tdSql.checkRows(0) + + # tdSql.execute('drop database if exists db') + # os.system("%staosBenchmark -y -f tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies + # tdSql.execute('use db') + # tdSql.query('show tables like \'YN123%\'') #child_table_exists = no, auto_create_table varies = 123 + # tdSql.checkRows(20) + # tdSql.query('show tables like \'YNN%\'') #child_table_exists = no, auto_create_table varies = no + # tdSql.checkRows(20) + # tdSql.query('show tables like \'YNY%\'') #child_table_exists = no, auto_create_table varies = yes + # tdSql.checkRows(20) + # tdSql.query('show tables like \'YYN%\'') #child_table_exists = yes, auto_create_table varies = no + # tdSql.checkRows(20) + # tdSql.query('show tables like \'YY123%\'') #child_table_exists = yes, auto_create_table varies = 123 + # tdSql.checkRows(20) + # tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes + # tdSql.checkRows(20) + + # # insert: test chinese encoding + # # TD-11399、TD-10819 + # os.system("%staosBenchmark -f tools/taosdemoAllTest/insert-chinese.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("show stables") + # for i in range(6): + # for j in range(6): + # if tdSql.queryResult[i][0] == 'stb%d'%j: + # # print(i,"stb%d"%j) + # tdSql.checkData(i, 4, (j+1)*10) + # for i in range(13): + # tdSql.query("select count(*) from stb%d"%i) + # tdSql.checkData(0, 0, (i+1)*100) # rm useless files os.system("rm -rf ./insert*_res.txt*") diff --git a/tests/system-test/0-management/0-database/.gitkeep b/tests/system-test/0-management/0-database/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/0-management/1-stable/.gitkeep b/tests/system-test/0-management/1-stable/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/0-management/2-table/.gitkeep b/tests/system-test/0-management/2-table/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/0-management/3-tag/.gitkeep b/tests/system-test/0-management/3-tag/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/0-management/4-others/.gitkeep b/tests/system-test/0-management/4-others/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/1-insert/0-sql/.gitkeep b/tests/system-test/0-others/.gitkeep similarity index 100% rename from tests/develop-test/1-insert/0-sql/.gitkeep rename to tests/system-test/0-others/.gitkeep diff --git a/tests/system-test/0-management/1-stable/create_col_tag.py b/tests/system-test/0-others/create_col_tag.py similarity index 100% rename from tests/system-test/0-management/1-stable/create_col_tag.py rename to tests/system-test/0-others/create_col_tag.py diff --git a/tests/develop-test/1-insert/1-stmt/.gitkeep b/tests/system-test/1-insert/.gitkeep similarity index 100% rename from tests/develop-test/1-insert/1-stmt/.gitkeep rename to tests/system-test/1-insert/.gitkeep diff --git a/tests/system-test/1-insert/0-sql/.gitkeep b/tests/system-test/1-insert/0-sql/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/1-insert/1-stmt/.gitkeep b/tests/system-test/1-insert/1-stmt/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/1-insert/2-schemaless/json/.gitkeep b/tests/system-test/1-insert/2-schemaless/json/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/1-insert/2-schemaless/line/.gitkeep b/tests/system-test/1-insert/2-schemaless/line/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/1-insert/2-schemaless/telnet/.gitkeep b/tests/system-test/1-insert/2-schemaless/telnet/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/1-insert/TD-11970.py b/tests/system-test/1-insert/TD-11970.py new file mode 100644 index 0000000000000000000000000000000000000000..ec80434b377158a4b9f5202aeda8c7fee600e867 --- /dev/null +++ b/tests/system-test/1-insert/TD-11970.py @@ -0,0 +1,84 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import time + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def __init__(self): + self.err_case = 0 + self.curret_case = 0 + + def caseDescription(self): + + ''' + case1 : [TD-11970] : there is no err return when create table using now+Ntimes. + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def check_td11970(self): + # this case expect all create table sql with now+Ntime is success. + tdSql.prepare() + tdSql.execute(f"create stable stb1(ts timestamp, c1 int) tags (tag1 int, tag2 timestamp)") + + try: + tdSql.execute(f"create table t1 using stb1 tags(1, now-100b)") + tdSql.execute(f"create table t2 using stb1 tags(2, now-100u)") + tdSql.execute(f"create table t3 using stb1 tags(3, now-100a)") + tdSql.execute(f"create table t4 using stb1 tags(4, now-100s)") + tdSql.execute(f"create table t5 using stb1 tags(5, now-100m)") + tdSql.execute(f"create table t6 using stb1 tags(6, now-100h)") + tdSql.execute(f"create table t7 using stb1 tags(7, now-100d)") + tdSql.execute(f"create table t8 using stb1 tags(8, now-100w)") + + tdSql.execute(f"create table t9 using stb1 tags(9, now+10b)") + tdSql.execute(f"create table t10 using stb1 tags(10, now+10u)") + tdSql.execute(f"create table t11 using stb1 tags(11, now+10a)") + tdSql.execute(f"create table t12 using stb1 tags(12, now+10s)") + tdSql.execute(f"create table t13 using stb1 tags(13, now+10m)") + tdSql.execute(f"create table t14 using stb1 tags(14, now+10h)") + tdSql.execute(f"create table t15 using stb1 tags(15, now+10d)") + tdSql.execute(f"create table t16 using stb1 tags(16, now+10w)") + self.curret_case += 1 + tdLog.printNoPrefix("the case for td-11970 run passed") + except: + self.err_case += 1 + tdLog.printNoPrefix("the case for td-11970 run failed") + + pass + + def run(self): + self.check_td11970() + + if self.err_case > 0: + tdLog.exit(f"{self.err_case} case run failed") + else: + tdLog.success("all case run passed") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/1-insert/2-schemaless/json/.gitkeep b/tests/system-test/2-query/.gitkeep similarity index 100% rename from tests/develop-test/1-insert/2-schemaless/json/.gitkeep rename to tests/system-test/2-query/.gitkeep diff --git a/tests/system-test/2-query/0-aggregate/.gitkeep b/tests/system-test/2-query/0-aggregate/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/1-select/.gitkeep b/tests/system-test/2-query/1-select/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/2-compute/.gitkeep b/tests/system-test/2-query/2-compute/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/3-join/.gitkeep b/tests/system-test/2-query/3-join/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/4-union/.gitkeep b/tests/system-test/2-query/4-union/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/5-session/.gitkeep b/tests/system-test/2-query/5-session/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/6-state_window/.gitkeep b/tests/system-test/2-query/6-state_window/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/7-nest/.gitkeep b/tests/system-test/2-query/7-nest/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/8-udf/.gitkeep b/tests/system-test/2-query/8-udf/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/9-others/.gitkeep b/tests/system-test/2-query/9-others/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/2-query/TD-11256.py b/tests/system-test/2-query/TD-11256.py new file mode 100644 index 0000000000000000000000000000000000000000..f101ad211377310d01ad44504e2e1c60a405e826 --- /dev/null +++ b/tests/system-test/2-query/TD-11256.py @@ -0,0 +1,78 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import time + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def __init__(self): + self.err_case = 0 + self.curret_case = 0 + + def caseDescription(self): + + ''' + case1 : [TD-11256] query the super table in a mixed way of expression + tbanme and using group by tbname + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def create_stb(self): + basetime = int(round(time.time() * 1000)) + tdSql.prepare() + tdSql.execute(f"create stable stb1(ts timestamp, c1 int) tags (tag1 int)") + for i in range(10): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"insert into t{i} values ({basetime}, {i})") + + pass + + def check_td11256(self): + # this case expect connect is current after run group by sql + tdSql.query("select count(*) from stb1 group by ts") + try: + tdSql.error("select c1/2, tbname from stb1 group by tbname") + tdSql.query("show databases") + self.curret_case += 1 + tdLog.printNoPrefix("the case1: td-11256 run passed") + except: + self.err_case += 1 + tdLog.printNoPrefix("the case1: td-11256 run failed") + pass + + def run(self): + self.create_stb() + + self.check_td11256() + + if self.err_case > 0: + tdLog.exit(f"{self.err_case} case for TD-11256 run failed") + else: + tdLog.success("case for TD-11256 run passed") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/2-query/9-others/TD-11389.py b/tests/system-test/2-query/TD-11389.py similarity index 100% rename from tests/system-test/2-query/9-others/TD-11389.py rename to tests/system-test/2-query/TD-11389.py diff --git a/tests/system-test/2-query/9-others/TD-11945_crash.py b/tests/system-test/2-query/TD-11945_crash.py similarity index 100% rename from tests/system-test/2-query/9-others/TD-11945_crash.py rename to tests/system-test/2-query/TD-11945_crash.py diff --git a/tests/system-test/2-query/0-aggregate/TD-12340-12342.py b/tests/system-test/2-query/TD-12340-12342.py similarity index 83% rename from tests/system-test/2-query/0-aggregate/TD-12340-12342.py rename to tests/system-test/2-query/TD-12340-12342.py index 360734f5979f194b417b00e1fa022e1d3147ecb1..b190e94a9e5b0380bcd906c3791a2432ba92403b 100644 --- a/tests/system-test/2-query/0-aggregate/TD-12340-12342.py +++ b/tests/system-test/2-query/TD-12340-12342.py @@ -47,18 +47,6 @@ class TDTestCase: pass - def check_td12340(self): - # this case expect return two column when using "group by ts" - tdSql.query("select count(*) from stb1 group by ts") - try: - tdSql.checkCols(2) - self.curret_case += 1 - tdLog.printNoPrefix("the case1: td-12340 run passed") - except: - self.err_case += 1 - tdLog.printNoPrefix("the case1: td-12340 run failed") - pass - def check_td12342(self): # this case expect return err when using "group by ts order by first-tag" try: @@ -73,7 +61,6 @@ class TDTestCase: def run(self): self.create_stb() - self.check_td12340() self.check_td12342() if self.err_case > 0: diff --git a/tests/system-test/2-query/TD-12344.py b/tests/system-test/2-query/TD-12344.py new file mode 100644 index 0000000000000000000000000000000000000000..871356d49bc738fc6290e79b13d4ea41013282ef --- /dev/null +++ b/tests/system-test/2-query/TD-12344.py @@ -0,0 +1,110 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from posixpath import split +import sys +import os + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record + self.num = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + def caseDescription(self): + + ''' + case1 : [TD-12344] : + this test case is an test case for unexpectd crash for session function , it will coredump taoshell ; + + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def getcfgPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + print(selfPath) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + cfgPath = projPath + "/sim/dnode1/cfg " + return cfgPath + + def run(self): + tdSql.prepare() + tdSql.execute("create database if not exists testdb keep 36500;") + tdSql.execute("use testdb;") + tdSql.execute("create stable st (ts timestamp , id int , value double) tags(hostname binary(10) ,ind int);") + for i in range(self.num): + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+100*i,i*2,i+10.00)) + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+200*i,i*2,i+10.00)) + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+300*i,i*2,i+10.00)) + tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+10000*i,i*2,i+10.00)) + + cfg_path = self.getcfgPath() + print(cfg_path) + tdSql.execute('select elapsed(ts,10s) from testdb.st where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" session(ts,1d) group by tbname;') # session not support super table + taos_cmd1= "taos -c %s -s 'select elapsed(ts,10s) from testdb.st where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" session(ts,1d) group by tbname;' " % (cfg_path) + _ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + diff --git a/tests/system-test/3-connectors/c#/test.sh b/tests/system-test/3-connectors/c#/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..2d4f18b668263d40bb18ef46f34b7299b3f7cdd3 --- /dev/null +++ b/tests/system-test/3-connectors/c#/test.sh @@ -0,0 +1,32 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` +cd ${WKC}/src/connector/C# +dotnet test +dotnet run --project src/test/Cases/Cases.csproj + +cd ${WKC}/tests/examples/C# +dotnet run --project C#checker/C#checker.csproj +dotnet run --project TDengineTest/TDengineTest.csproj +dotnet run --project schemaless/schemaless.csproj + +cd ${WKC}/tests/examples/C#/taosdemo +dotnet build -c Release +tree | true +./bin/Release/net5.0/taosdemo -c /etc/taos -y diff --git a/tests/system-test/3-connectors/go/test.sh b/tests/system-test/3-connectors/go/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..097723ad461b69c75e18bc8018c025f0e9f7a3e3 --- /dev/null +++ b/tests/system-test/3-connectors/go/test.sh @@ -0,0 +1,20 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` + diff --git a/tests/system-test/3-connectors/java/test.sh b/tests/system-test/3-connectors/java/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..15f7b84955b793e0fb6acaa434fba83c6ff0c710 --- /dev/null +++ b/tests/system-test/3-connectors/java/test.sh @@ -0,0 +1,17 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 diff --git a/tests/system-test/3-connectors/nodejs/test.sh b/tests/system-test/3-connectors/nodejs/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..3b1d8bb4790d6273e32a42ce50979e98e1ce5a92 --- /dev/null +++ b/tests/system-test/3-connectors/nodejs/test.sh @@ -0,0 +1,29 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` +cd ${WKC}/src/connector/nodejs +npm install +npm run test +cd ${WKC}/tests/examples/nodejs +npm install td2.0-connector > /dev/null 2>&1 +node nodejsChecker.js host=localhost +node test1970.js +cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport +npm install td2.0-connector > /dev/null 2>&1 +node nanosecondTest.js diff --git a/tests/system-test/3-connectors/python/test.sh b/tests/system-test/3-connectors/python/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..fe0dfbdac99f6938c8a57d13666f609c2c7c5d33 --- /dev/null +++ b/tests/system-test/3-connectors/python/test.sh @@ -0,0 +1,30 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../src/connector/python +pip3 install pytest +pytest tests/ + +python3 examples/bind-multi.py +python3 examples/bind-row.py +python3 examples/demo.py +python3 examples/insert-lines.py +python3 examples/pep-249.py +python3 examples/query-async.py +python3 examples/query-objectively.py +python3 examples/subscribe-sync.py +python3 examples/subscribe-async.py diff --git a/tests/system-test/3-connectors/restful/test.sh b/tests/system-test/3-connectors/restful/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..1c6d8fbc2c5da6633d749054a19a5bde7772faf7 --- /dev/null +++ b/tests/system-test/3-connectors/restful/test.sh @@ -0,0 +1,19 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` diff --git a/tests/system-test/3-connectors/rust/test.sh b/tests/system-test/3-connectors/rust/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..4bf6394b27cf43674ed38a1e4de46342ee3b1ae4 --- /dev/null +++ b/tests/system-test/3-connectors/rust/test.sh @@ -0,0 +1,19 @@ +#!/bin/bash +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} +stopTaosd +rm -rf /var/lib/taos/* +rm -rf /var/log/taos/* +nohup taosd -c /etc/taos/ > /dev/null 2>&1 & +sleep 10 +cd ../../ +WKC=`pwd` \ No newline at end of file diff --git a/tests/system-test/4-taosAdapter/TD-12163.py b/tests/system-test/4-taosAdapter/TD-12163.py new file mode 100644 index 0000000000000000000000000000000000000000..aafc218611c1c51e3d6f82d802c95fd8e2c16cf5 --- /dev/null +++ b/tests/system-test/4-taosAdapter/TD-12163.py @@ -0,0 +1,118 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from taosdata +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import time +import requests + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def __init__(self): + self.err_case = 0 + self.curret_case = 0 + self.url = "http://127.0.0.1:6041/rest/sql" + self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + + def caseDescription(self): + + ''' + case1 : [TD-12163] alter table-schema using restful interface + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def check_td12163(self): + # basetime = int(round(time.time() * 1000)) + tdSql.prepare() + tdSql.execute(f"create stable stb1(ts timestamp, c1 int) tags (tag1 int)") + tdSql.execute(f"create table nt1 (nts timestamp, nc1 int)") + + add_column_stb = "alter table db.stb1 add column c2 float" + drop_column_stb = "alter table db.stb1 drop column c2 " + add_column_ntb = "alter table db.nt1 add column nc2 float" + drop_column_ntb = "alter table db.nt1 drop column nc2 " + + conn_add_stb = requests.post(url=self.url, headers=self.header, data=add_column_stb) + resp_code_stb_add = conn_add_stb.status_code + resp_add_stb = conn_add_stb.json() + try: + assert resp_code_stb_add//200 == 1 + assert resp_add_stb["status"] == "succ" + self.curret_case += 1 + tdLog.printNoPrefix("the case add column to stable successful") + except: + self.err_case += 1 + tdLog.printNoPrefix("the case add column to stable failed") + + + conn_add_ntb = requests.post(url=self.url, headers=self.header, data=add_column_ntb) + resp_code_ntb_add = conn_add_ntb.status_code + resp_add_ntb = conn_add_ntb.json() + try: + assert resp_code_ntb_add//200 == 1 + assert resp_add_ntb["status"] == "succ" + self.curret_case += 1 + tdLog.printNoPrefix("the case add column to normal table successful") + except: + self.err_case += 1 + tdLog.printNoPrefix("the case add column to normal table failed") + + conn_drop_stb = requests.post(url=self.url, headers=self.header, data=drop_column_stb) + resp_code_stb_drop = conn_drop_stb.status_code + resp_drop_stb = conn_drop_stb.json() + try: + assert resp_code_stb_drop // 200 == 1 + assert resp_drop_stb["status"] == "succ" + self.curret_case += 1 + tdLog.printNoPrefix("the case drop column to stable successful") + except: + self.err_case += 1 + tdLog.printNoPrefix("the case add column to stable failed") + + conn_drop_ntb = requests.post(url=self.url, headers=self.header, data=drop_column_ntb) + resp_code_ntb_drop = conn_drop_ntb.status_code + resp_drop_ntb = conn_drop_ntb.json() + try: + assert resp_code_ntb_drop // 200 == 1 + assert resp_drop_ntb["status"] == "succ" + self.curret_case += 1 + tdLog.printNoPrefix("the case drop column to stable successful") + except: + self.err_case += 1 + tdLog.printNoPrefix("the case add column to stable failed") + + pass + + def run(self): + self.check_td12163() + + if self.err_case > 0: + tdLog.exit(f"{self.err_case} case for TD-12163 run failed") + else: + tdLog.success("case for TD-12163 run passed") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/develop-test/1-insert/2-schemaless/line/.gitkeep b/tests/system-test/5-taos-tools/.gitkeep similarity index 100% rename from tests/develop-test/1-insert/2-schemaless/line/.gitkeep rename to tests/system-test/5-taos-tools/.gitkeep diff --git a/tests/system-test/5-taos-tools/taosdump/basic.py b/tests/system-test/5-taos-tools/basic.py similarity index 100% rename from tests/system-test/5-taos-tools/taosdump/basic.py rename to tests/system-test/5-taos-tools/basic.py diff --git a/tests/system-test/5-taos-tools/taosbenchmark/.gitkeep b/tests/system-test/5-taos-tools/taosbenchmark/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/5-taos-tools/taosdump/.gitkeep b/tests/system-test/5-taos-tools/taosdump/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/system-test/7-customer/.gitkeep b/tests/system-test/7-customer/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/develop-test/1-insert/2-schemaless/telnet/.gitkeep b/tests/system-test/fulltest-cluster.sh old mode 100644 new mode 100755 similarity index 100% rename from tests/develop-test/1-insert/2-schemaless/telnet/.gitkeep rename to tests/system-test/fulltest-cluster.sh diff --git a/tests/system-test/fulltest-connector.sh b/tests/system-test/fulltest-connector.sh new file mode 100755 index 0000000000000000000000000000000000000000..dbb77b2ce07d8c34c549a22a3218ebcb6894d2a3 --- /dev/null +++ b/tests/system-test/fulltest-connector.sh @@ -0,0 +1,7 @@ +bash 3-connectors/c#/test.sh +bash 3-connectors/go/test.sh +bash 3-connectors/java/test.sh +bash 3-connectors/nodejs/test.sh +bash 3-connectors/python/test.sh +bash 3-connectors/restful/test.sh +bash 3-connectors/rust/test.sh diff --git a/tests/system-test/fulltest-insert.sh b/tests/system-test/fulltest-insert.sh new file mode 100755 index 0000000000000000000000000000000000000000..709fab8791b37169a236887d57109a93cb38b585 --- /dev/null +++ b/tests/system-test/fulltest-insert.sh @@ -0,0 +1,5 @@ + +python3 ./test.py -f 1-insert/batchInsert.py +python3 test.py -f 1-insert/TD-11970.py + + diff --git a/tests/system-test/fulltest-others.sh b/tests/system-test/fulltest-others.sh new file mode 100755 index 0000000000000000000000000000000000000000..26e6aee2b90a619329e23bb8418b95cc0466a78f --- /dev/null +++ b/tests/system-test/fulltest-others.sh @@ -0,0 +1 @@ +python3 ./test.py -f 0-others/create_col_tag.py \ No newline at end of file diff --git a/tests/system-test/fulltest-query.sh b/tests/system-test/fulltest-query.sh new file mode 100755 index 0000000000000000000000000000000000000000..efdbbe4047791dfa865d2897c63681fb6b41b9c6 --- /dev/null +++ b/tests/system-test/fulltest-query.sh @@ -0,0 +1,5 @@ +python3 ./test.py -f 2-query/TD-11256.py +# python3 ./test.py -f 2-query/TD-11389.py +python3 ./test.py -f 2-query/TD-11945_crash.py +python3 ./test.py -f 2-query/TD-12340-12342.py +python3 ./test.py -f 2-query/TD-12344.py diff --git a/tests/system-test/fulltest-taosAdapter.sh b/tests/system-test/fulltest-taosAdapter.sh new file mode 100755 index 0000000000000000000000000000000000000000..f7ca439fd4407438d3ffaf11a6dcd2c839508404 --- /dev/null +++ b/tests/system-test/fulltest-taosAdapter.sh @@ -0,0 +1,3 @@ +python3 test.py -f 4-taosAdapter/TD-12163.py +python3 ./test.py -f 4-taosAdapter/taosAdapter_insert.py +python3 ./test.py -f 4-taosAdapter/taosAdapter_query.py \ No newline at end of file diff --git a/tests/system-test/fulltest-tools.sh b/tests/system-test/fulltest-tools.sh new file mode 100755 index 0000000000000000000000000000000000000000..382374efc38a1976cfc2de0c989129c03e157acf --- /dev/null +++ b/tests/system-test/fulltest-tools.sh @@ -0,0 +1 @@ +python3 ./test.py -f 5-taos-tools/basic.py diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh deleted file mode 100755 index 4087918668baa792c24331569a916c243eae9c76..0000000000000000000000000000000000000000 --- a/tests/system-test/fulltest.sh +++ /dev/null @@ -1,7 +0,0 @@ -python3 test.py -f 0-management/1-stable/create_col_tag.py -# python3 test.py -f 2-query/9-others/TD-11945_crash.py # this test case must need TD-6140 merge into develop -#python3 test.py -f 2-query/9-others/TD-11389.py # this case will run when this bug fix TD-11389 -python3 test.py -f 4-taosAdapter/taosAdapter_query.py -python3 test.py -f 4-taosAdapter/taosAdapter_insert.py -python3 test.py -f 5-taos-tools/taosdump/basic.py - diff --git a/tests/test-CI.sh b/tests/test-CI.sh new file mode 100755 index 0000000000000000000000000000000000000000..c458be0aa184d6d0a3831554d4974a4b98662cfe --- /dev/null +++ b/tests/test-CI.sh @@ -0,0 +1,222 @@ +#!/bin/bash + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +tests_dir=`pwd` +IN_TDINTERNAL="community" + +function stopTaosd { + echo "Stop taosd" + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} + +function dohavecore(){ + corefile=`find $corepath -mmin 1` + if [ -n "$corefile" ];then + core_file=`echo $corefile|cut -d " " -f2` + proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,` + echo 'taosd or taos has generated core' + rm case.log + if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then + cd ../../../ + tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so* + if [[ $2 == 1 ]];then + cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"` + else + cd community + cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` + fi + else + cd ../../ + if [[ $1 == 1 ]];then + tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so* + cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` + fi + fi + if [[ $1 == 1 ]];then + echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit + exit 8 + fi + fi +} + + +function runPyCaseOneByOne { + while read -r line; do + if [[ $line =~ ^python.* ]]; then + if [[ $line != *sleep* ]]; then + + if [[ $line =~ '-r' ]];then + case=`echo $line|awk '{print $4}'` + else + case=`echo $line|awk '{print $NF}'` + fi + start_time=`date +%s` + date +%F\ %T | tee -a pytest-out.log + echo -n $case + $line > /dev/null 2>&1 && \ + echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ + echo -e "${RED} failed${NC}" | tee -a pytest-out.log + end_time=`date +%s` + out_log=`tail -1 pytest-out.log ` + # if [[ $out_log =~ 'failed' ]];then + # exit 8 + # fi + echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log + else + $line > /dev/null 2>&1 + fi + fi + done < $1 +} + +function runPyCaseOneByOnefq() { + end=`sed -n '$=' $1` + for ((i=1;i<=$end;i++)) ; do + if [[ $(($i%$2)) -eq $4 ]];then + line=`sed -n "$i"p $1` + if [[ $line =~ ^python.* ]]; then + if [[ $line != *sleep* ]]; then + + if [[ $line =~ '-r' ]];then + case=`echo $line|awk '{print $4}'` + else + case=`echo $line|awk '{print $NF}'` + fi + start_time=`date +%s` + date +%F\ %T | tee -a pytest-out.log + echo -n $case + if [[ $1 =~ full ]] ; then + line=$line" -s" + fi + $line > case.log 2>&1 && \ + echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ + echo -e "${RED} failed${NC}" | tee -a pytest-out.log + end_time=`date +%s` + out_log=`tail -1 pytest-out.log ` + if [[ $out_log =~ 'failed' ]];then + cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` + echo '=====================log===================== ' + cat case.log + rm -rf case.log + dohavecore $3 2 + if [[ $3 == 1 ]];then + exit 8 + fi + fi + echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log + else + $line > /dev/null 2>&1 + fi + dohavecore $3 2 + else + echo $line + if [[ $line =~ ^bash.* ]]; then + # $line > case.log 2>&1 || cat case.log && exit 8 + # cat case.log + $line > case.log 2>&1 + if [ $? -ne 0 ];then + cat case.log + exit 8 + fi + fi + fi + fi + done + rm -rf ../../sim/case.log +} + +###################### +# main entry +###################### + +unameOut="$(uname -s)" +case "${unameOut}" in + Linux*) OS=Linux;; + Darwin*) OS=Darwin;; + CYGWIN*) OS=Windows;; + *) OS=Unknown;; +esac + +case "${OS}" in + Linux*) TAOSLIB=libtaos.so;; + Darwin*) TAOSLIB=libtaos.dylib;; + Windows*) TAOSLIB=taos.dll;; + Unknown) TAOSLIB="UNKNOWN:${unameOut}";; +esac + +echo TAOSLIB is ${TAOSLIB} + +totalFailed=0 +totalPyFailed=0 +totalJDBCFailed=0 +totalUnitFailed=0 +totalExampleFailed=0 +totalApiFailed=0 + +if [ "${OS}" == "Linux" ]; then + corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` + if [ -z "$corepath" ];then + echo "/coredump/core_%e_%p_%t" > /proc/sys/kernel/core_pattern || echo "Permission denied" + corepath="/coredump/" + fi +fi + + +echo "### run Python test case ###" + +cd $tests_dir + +if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then + cd ../.. +else + cd ../ +fi + +TOP_DIR=`pwd` +TAOSLIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1` +if [[ "$TAOSLIB_DIR" == *"$IN_TDINTERNAL"* ]]; then + LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4,5` +else + LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4` +fi + +export LD_LIBRARY_PATH=$TOP_DIR/$LIB_DIR:$LD_LIBRARY_PATH + +cd $tests_dir/pytest + +[ -f pytest-out.log ] && rm -f pytest-out.log + +if [ "$1" == "full" ]; then + echo "### run Python full test ###" + runPyCaseOneByOne fulltest-tools.sh + runPyCaseOneByOne fulltest-query.sh + runPyCaseOneByOne fulltest-other.sh + runPyCaseOneByOne fulltest-insert.sh + runPyCaseOneByOne fulltest-connector.sh +else + echo "### run $1 $2 test ###" + if [ "$1" != "query" ] && [ "$1" != "taosAdapter" ] && [ "$1" != "other" ] && [ "$1" != "tools" ] && [ "$1" != "insert" ] && [ "$1" != "connector" ] ;then + echo " wrong option:$1 must one of [query,other,tools,insert,connector,taosAdapter]" + exit 8 + fi + cd $tests_dir/pytest + runPyCaseOneByOnefq fulltest-$1.sh $2 1 $3 + cd $tests_dir/develop-test + runPyCaseOneByOnefq fulltest-$1.sh $2 1 $3 + cd $tests_dir/system-test + runPyCaseOneByOnefq fulltest-$1.sh $2 1 $3 +fi +