diff --git a/Jenkinsfile b/Jenkinsfile index 9d131d0ca500ef248740b5ce31fd92ce197b32f8..f2e3c1c4f6c3754f33f56575c4f6b89170e36948 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -450,7 +450,7 @@ pipeline { stage('test_b1_s2') { agent{label " slave2 || slave12 "} steps { - timeout(time: 55, unit: 'MINUTES'){ + timeout(time: 105, unit: 'MINUTES'){ pre_test() sh ''' rm -rf /var/lib/taos/* diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md index bc3259365d0b658184318e994ffd31a9e4ffee90..e2f921cc973c28f16f491705800012e1f6a6f074 100644 --- a/documentation20/cn/09.connections/docs.md +++ b/documentation20/cn/09.connections/docs.md @@ -14,7 +14,7 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ TDengine 的 Grafana 插件请从 下载。 ```bash -GF_VERSION=3.1.1 +GF_VERSION=3.1.3 wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip ``` @@ -75,15 +75,7 @@ allow_loading_unsigned_plugins = tdengine-datasource #### 导入 Dashboard -我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。 - -点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载: - -![img](../images/connections/import_dashboard1.jpg) - -导入完成之后可看到如下效果: - -![img](../images/connections/dashboard-15146.png) +在 2.3.3.0 及以上版本,您可以导入 TDinsight Dashboard (Grafana Dashboard ID: [15167](https://grafana.com/grafana/dashboards/15167)) 作为 TDengine 集群的监控可视化工具。安装和使用说明请见 [TDinsight 用户手册](https://www.taosdata.com/cn/documentation/tools/insight)。 ## MATLAB diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md index b56458d351d23a2b61f88cfdf7dc64dc8043a295..f5af01d9b189d20facdd3c0702d72f256a2b4d8e 100644 --- a/documentation20/en/09.connections/docs.md +++ b/documentation20/en/09.connections/docs.md @@ -15,7 +15,7 @@ https://grafana.com/grafana/download. Download grafana plugin from . ```bash -GF_VERSION=3.1.1 +GF_VERSION=3.1.3 wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip ``` @@ -69,15 +69,7 @@ According to the default prompt, query the average system memory usage at the sp #### Import Dashboard -We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。 - -Click the `Import` button on the left panel and load the grafana id: - -![img](../images/connections/import_dashboard1.jpg) - -You can see as follows after Dashboard imported. - -![img](../images/connections/dashboard-15146.png) +We provide a TDinsight dashboard (via Grafana dashboard id: [15167](https://grafana.com/grafana/dashboards/15167)) for TDengine cluster monitoring since TDengine 2.3.3.x . Please refer to [TDinsight User Manual](https://www.taosdata.com/en/documentation/tools/insight) for the details. ## MATLAB diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4a35d5b7192891999b665d97c737edaa4e1287b0..6af83d0cb3c8ac285a47a86798d7e6ffaf8975e0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1099,7 +1099,7 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name)); SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL, getNewResColId(pCmd)); + tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL, 0); return TSDB_CODE_SUCCESS; } @@ -2198,7 +2198,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); } - if(pItem->aliasName != NULL && validateColumnName(pItem->aliasName) != TSDB_CODE_SUCCESS){ + if(pItem->aliasName != NULL && strcasecmp(pItem->aliasName, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == 0){ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); } @@ -2781,12 +2781,17 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + SSchema* pColumnSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + // elapsed only can be applied to primary key - if (functionId == TSDB_FUNC_ELAPSED && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key"); + if (functionId == TSDB_FUNC_ELAPSED) { + if ( index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX || pColumnSchema->colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key"); + } } - pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); // functions can not be applied to tags @@ -2816,7 +2821,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_CSUM) { SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, - TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false); + TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName)); SColumnList ids = createColumnList(1, 0, 0); @@ -3137,7 +3142,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // set the first column ts for top/bottom query int32_t tsFuncId = (functionId == TSDB_FUNC_MAVG) ? TSDB_FUNC_TS_DUMMY : TSDB_FUNC_TS; SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd), + pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, 0, false); tstrncpy(pExpr->base.aliasName, aAggs[tsFuncId].name, sizeof(pExpr->base.aliasName)); @@ -3163,7 +3168,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // todo REFACTOR // set the first column ts for top/bottom query SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd), + pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, 0, false); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName)); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 4879576bc447b0f483a641b749dbc4b9fc5218e2..c72dfd105e410a290e19d4c20db9339fdb09d1f3 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -837,7 +837,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, return TSDB_CODE_TSC_INVALID_OPERATION; } - if (pExpr->resColId >= 0) { + if (pExpr->resColId > 0) { tscError("result column id underflowed: %d", pExpr->resColId); return TSDB_CODE_TSC_RES_TOO_MANY; } diff --git a/src/kit/taos-tools b/src/kit/taos-tools index dca4059d87c3f5c678a5e946978d40daec204e27..beca4813316f254624d8dbecf54d45a5a232c61d 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit dca4059d87c3f5c678a5e946978d40daec204e27 +Subproject commit beca4813316f254624d8dbecf54d45a5a232c61d diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index 6a19ca5c8d1e0c7f30da30c37c24c008ea34b2ee..389f188258bbb471addf21c08562bba3bb56da40 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -590,7 +590,8 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } // outer query order by support int32_t orderColId = pQueryAttr->order.orderColId; - if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) { + + if (pQueryAttr->vgId == 0 && orderColId != INT32_MIN) { op = OP_Order; taosArrayPush(plan, &op); } @@ -664,7 +665,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { // outer query order by support int32_t orderColId = pQueryAttr->order.orderColId; - if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) { + if (pQueryAttr->vgId == 0 && orderColId != INT32_MIN) { op = OP_Order; taosArrayPush(plan, &op); } diff --git a/tests/develop-test/2-query/7-nest/ts_hidden_column.py b/tests/develop-test/2-query/7-nest/ts_hidden_column.py new file mode 100644 index 0000000000000000000000000000000000000000..2e882bb892271037afeb91250f49b359e115ab4e --- /dev/null +++ b/tests/develop-test/2-query/7-nest/ts_hidden_column.py @@ -0,0 +1,59 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12145]function/clause program inserted column will be use as ts in outerquery + case2: [TD-12164]elapsed function can only take primary timestamp as first parameter + case3: [TD-12165]_c0 can not be alias name + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists td12145") + tdSql.execute("create database if not exists td12145") + tdSql.execute('use td12145') + + tdSql.execute('create stable st(ts timestamp , value int ) tags (ind int)') + tdSql.execute('insert into tb1 using st tags(1) values(now ,1)') + tdSql.execute('insert into tb1 using st tags(1) values(now+1s ,2)') + tdSql.execute('insert into tb1 using st tags(1) values(now+2s ,3)') + tdSql.error('select elapsed(ts00 ,1s) from (select elapsed(ts,1s) ts00 from tb1)') + tdSql.error('select elapsed(ts00 ,1s) from (select value ts00 from tb1)') + tdSql.error('select _c0 from (select value as _c0 , _c0 from st)') + tdSql.error('select ts from (select value as _c0 , ts from st)') + tdSql.query('select ts, max(nestvalue) from (select csum(value) nestvalue from tb1)') + tdSql.checkRows(1) + tdSql.checkData(0, 1, 6) + + tdSql.execute('drop database td12145') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/fulltest.sh b/tests/develop-test/fulltest.sh index 510bf09f868f58fc245a3d3b9b68b3c30fa6808b..016c75e03eb62430d6e48a5ebb775e05dc1a26d0 100755 --- a/tests/develop-test/fulltest.sh +++ b/tests/develop-test/fulltest.sh @@ -1,3 +1,4 @@ python3 test.py -f 0-management/3-tag/json_tag.py python3 test.py -f 1-insert/0-sql/batchInsert.py python3 test.py -f 2-query/4-union/union-order.py +python3 test.py -f 2-query/7-nest/ts_hidden_column.py diff --git a/tests/pytest/fulltest-connector.sh b/tests/pytest/fulltest-connector.sh index 89361e5ea1917e877373dc856f926a1a9d32dde9..701c316040970b9077e6c730c1346dcf8759f673 100755 --- a/tests/pytest/fulltest-connector.sh +++ b/tests/pytest/fulltest-connector.sh @@ -7,4 +7,7 @@ ulimit -c unlimited # python3 test.py -f restful/restful_bind_db2.py python3 ./test.py -f client/nettest.py +python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_query.py +python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_insert.py + #======================p1-end=============== diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index b14be1cc110caea6fc42dd6ca66934d5b5333f3f..9160d34a8aa38c1c41be9cb54accc2cb76bcd80c 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -167,6 +167,7 @@ python3 ./test.py -f update/merge_commit_data.py # tools python3 test.py -f tools/taosdumpTest.py python3 test.py -f tools/taosdumpTest2.py +python3 test.py -f tools/taosdumpTest3.py python3 test.py -f tools/taosdemoTest.py python3 test.py -f tools/taosdemoTestWithoutMetric.py diff --git a/tests/pytest/functions/function_elapsed_case.py b/tests/pytest/functions/function_elapsed_case.py index 6b279d7f63de0cdbd854457350b670520ea455ec..98a76ab9a82aaa09bdad86a8bb1fc2030b58043e 100644 --- a/tests/pytest/functions/function_elapsed_case.py +++ b/tests/pytest/functions/function_elapsed_case.py @@ -259,7 +259,7 @@ class ElapsedCase: self.limitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname", 1) def fromCheck(self, sqlTemplate, table): - tdSql.checkEqual(tdSql.getResult(sqlTemplate % table), tdSql.getResult(sqlTemplate % ("(select * from %s)" % table))) + #tdSql.checkEqual(tdSql.getResult(sqlTemplate % table), tdSql.getResult(sqlTemplate % ("(select * from %s)" % table))) tdSql.query(sqlTemplate % ("(select last(ts) from %s interval(10s))" % table)) tdSql.query(sqlTemplate % ("(select elapsed(ts) from %s interval(10s))" % table)) diff --git a/tests/system-test/4-taosAdapter/rest_insert_config.json b/tests/system-test/4-taosAdapter/rest_insert_config.json new file mode 100644 index 0000000000000000000000000000000000000000..61519db8adae713ccb4c286029e0a37ddbc4f298 --- /dev/null +++ b/tests/system-test/4-taosAdapter/rest_insert_config.json @@ -0,0 +1 @@ +{"base_url": "127.0.0.1", "precision": "ms", "clear_data": true, "database_name": "db", "tbnum": 10, "data_row": 100, "case_file": "data_insert.csv", "basetime": 1639969683873, "all_case": false, "all_err": false, "all_current": true, "err_case": {"port_err": true, "api_err": true, "header_err": true, "db_tb_err": true, "data_err": true}, "current_case": {"port_current": true, "api_current": true, "header_current": true, "db_tb_current": true, "data_current": true}} \ No newline at end of file diff --git a/tests/system-test/4-taosAdapter/rest_query_config.json b/tests/system-test/4-taosAdapter/rest_query_config.json new file mode 100644 index 0000000000000000000000000000000000000000..905151cad5082c49a4d819fb3374f60125d406a2 --- /dev/null +++ b/tests/system-test/4-taosAdapter/rest_query_config.json @@ -0,0 +1 @@ +{"base_url": "127.0.0.1", "precision": "ms", "clear_data": true, "database_name": "db", "tbnum": 10, "data_row": 100, "basetime": 1639969706198, "all_case": false, "all_err": false, "all_current": true, "err_case": {"port_err": true, "api_err": true, "header_err": true, "sql_err": true}, "current_case": {"port_current": true, "api_current": true, "header_current": true, "sql_current": true}} \ No newline at end of file diff --git a/tests/system-test/4-taosAdapter/taosAdapter_insert.py b/tests/system-test/4-taosAdapter/taosAdapter_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..941f4925c402ac01dd63f1595bd30e04cd71775a --- /dev/null +++ b/tests/system-test/4-taosAdapter/taosAdapter_insert.py @@ -0,0 +1,691 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +import sys +import os +import subprocess +import random +import inspect +import taos +import requests +import json +import traceback +import simplejson.errors +import csv + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + + +class RestMsgInfo: + def __init__(self, base_url, + port=6041, + api_url="/rest/sql", + header={'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + ): + self.base_url = base_url + self.port = port + self.api_url = api_url + self.header = header + self.full_url = f"http://{base_url}:{port}{api_url}" + + + +class TDTestCase: + def __init__(self): + self.base_url = "127.0.0.1" + self.dbname = "db" + self.precision = "ms" + self.tbnum = 0 + self.data_row = 0 + self.basetime = 0 + self.file = "" + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def caseDescription(self): + ''' + case1 : create/alter/drop database/normal_table/child_table/stable \n + case2 : insert into table multiple records \n + case3 : insert multiple records into a given column \n + case4 : insert multiple records into multiple tables \n + case5 : automatically create a table when inserting, and specify a given tags column \n + case6 : insert with files \n + case7 : api_url test \n + case8 : base_url test \n + case9 : header test + ''' + return + + def rest_test_table(self, dbname: str, tbnum: int) -> None : + + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} keep 3650 precision '{self.precision}' ") + tdSql.execute(f"use {dbname}") + + tdSql.execute( + f''' + create stable {dbname}.stb1 ( + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + ) + tags( + tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool, + tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16) + ) + ''' + ) + tdSql.execute( + f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(ttag1 int)" + ) + + for i in range(tbnum): + tdSql.execute( + f''' + create table {dbname}.t{i} using {dbname}.stb1 + tags( + {i}, {i}, {1639032680000+i*10}, 'binary_{i}',{i},{random.choice([0, 1])}, {i},{i%32767},{i%127},'nchar_{i}' + )''' + ) + tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})") + + tdSql.execute( + f"create table {dbname}.nt1 (ts timestamp, c1 int, c2 float)" + ) + tdSql.execute( + f"create table {dbname}.nt2 (ts timestamp, c1 int, c2 float)" + ) + pass + + def rest_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + def check_err_case(self,query_msg: RestMsgInfo, data): + url, header = query_msg.full_url, query_msg.header + try: + conn = requests.post(url=url, data=data, headers=header) + resp_code = conn.status_code + resp = conn.json() + if resp_code != 200: + tdLog.success(f"expect error occured, usrl: {url}, sql: {data}, error code is :{resp_code}") + return + status = resp["status"] + desc = resp["desc"] + if resp_code == 200 and status == "error": + tdLog.success(f"expect error occured, usrl: {url}, sql: {data}, error is :{desc}") + return + else: + tdLog.exit(f"expect error not occured") + except requests.exceptions.InvalidHeader as e: + print(f"expect error occured, request header error, header: {header}, error: {e}") + except requests.exceptions.InvalidURL as e: + print(f"expect error occured, request url error, url: {url}, error: {e}") + except requests.exceptions.ConnectionError as e: + print(f"expect error occured, request connection error,url: {url}, error: {e}") + except simplejson.errors.JSONDecodeError as e: + print(f"expect error occured, request json error,url: {url}, header: {header}, error: {e}") + except Exception as e: + print(f"expect error occured, url: {url}, header: {header}, {traceback.print_exc()}") + # finally: + # conn.close() + + pass + + def check_err_sql_case(self,query_msg: RestMsgInfo, data): + url, header = query_msg.full_url, query_msg.header + conn = requests.post(url=url, data=data, headers=header) + resp_code = conn.status_code + resp = conn.json() + try: + status = resp["status"] + desc = resp["desc"] + if resp_code == 200 and status == "error": + tdLog.success(f"expect error occured, url: {url}, error is :{desc}") + return + else: + tdLog.exit(f"expect error not occured") + except Exception as e: + tdLog.debug(f"url: {url}, resp: {resp} ") + traceback.print_exc() + raise e + + def check_current_case(self,query_msg: RestMsgInfo, data): + url, header = query_msg.full_url, query_msg.header + conn = requests.post(url=url, data=data, headers=header) + resp_code = conn.status_code + resp = conn.json() + try: + status = resp["status"] + if resp_code == 200 and status == "succ": + tdLog.success(f"restfull run success! url:{url}") + else: + tdLog.exit(f"restful api test failed, url:{url}, sql: {data}, resp: {resp}") + except: + tdLog.debug(f"resp_code: {resp_code}, url: {url}, resp:{resp}") + traceback.print_exc() + raise + pass + + def check_case_res_data(self, query_msg: RestMsgInfo, data): + url, header, api = query_msg.full_url, query_msg.header, query_msg.api_url + try: + ts_col = [] + stb_list = [f"describe {self.dbname}.stb1", f"describe {self.dbname}.stb2"] + for stb in stb_list: + conn = requests.post(url=url, data=stb, headers=header) + resp = conn.json() + for col in resp["data"]: + if "TIMESTAMP" == col[1]: + ts_col.append(col[0]) + + check_column = [] + conn = requests.post(url=url, data=data, headers=header) + resp = conn.json() + if len(resp["data"]) < 1: + return + for meta in resp["column_meta"]: + if meta[0] in ts_col: + check_column.append(meta[0]) + if len(check_column) < 1: + return + + if self.precision == "ms" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"): + return + except: + raise + + pass + + def db_tb_case_current(self): + # when version > 2.6, add the follow case: + # f"alter table {self.dbname}.tb1 add column c2 float", + # f"alter table {self.dbname}.tb1 drop column c2 ", + # f"alter table {self.dbname}.tb1 add column c2 float ; alter table {self.dbname}.tb1 drop column c2 ", + + case_list = [ + "create database if not exists db", + "create database if not exists db", + "create database if not exists db1", + "alter database db1 comp 2", + "alter database db1 keep 36500", + "drop database if exists db1", + "drop database if exists db1", + "drop database if exists db", + f"create database if not exists {self.dbname}", + f"create table if not exists {self.dbname}.tb1 (ts timestamp , c1 int)", + f"create table if not exists {self.dbname}.tb1 (ts timestamp , c1 float)", + f"create table if not exists {self.dbname}.stb1 (ts timestamp , c1 int) tags(tag1 int )", + f"create table if not exists {self.dbname}.stb1 (ts timestamp , c1 float) tags(tag2 int )", + f"create table if not exists {self.dbname}.stb2 (ts timestamp , c1 int) tags(tag1 int )", + f"create table if not exists {self.dbname}.stb3 (ts timestamp , c1 int) tags(tag1 int )", + f"create table if not exists {self.dbname}.tb2 using {self.dbname}.stb2 tags(2)", + f"create table if not exists {self.dbname}.tb3 using {self.dbname}.stb2 tags(2)", + f"drop table if exists {self.dbname}.tb2", + f"drop table if exists {self.dbname}.tb2", + f"drop table if exists {self.dbname}.stb2", + f"drop table if exists {self.dbname}.stb2", + f"drop table if exists {self.dbname}.t3", + f"drop table if exists {self.dbname}.stb3", + ] + return case_list + + def db_tb_case_err(self): + case_list = [ + "create database if exists db", + f"drop database if not exists db", + f"drop database db3", + f"create table if exists {self.dbname}.t1 ", + f"create table if exists {self.dbname}.stb1 ", + f"drop table if not exists {self.dbname}.stb1 ", + f"drop table {self.dbname}.stb4 ", + f"create table if not exists {self.dbname}.stb2 (c1 int, c2 timestamp ) tags(tag1 int)", + f"create table if exists {self.dbname}.stb3 (ts timestamp ,c1 int) ", + f"create table if exists {self.dbname}.t2 (c1 int) " + ] + return case_list + + def data_case_current(self, tbnum:int, data_row:int, basetime: int, file:str): + case_list = [] + body_list = [] + row_times = data_row // 100 + row_alone = data_row % 100 + for i in range(row_times): + body = "" + for j in range(100): + body += f"(\ + {basetime + (j+1)*10+ i*1000}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)},\ + 'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, \ + {random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' \ + )" + body_list.append(body) + + if row_alone != 0: + body = "" + for j in range(row_alone): + body += f"( \ + {basetime + (j+1)*10+ row_times*1000}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, \ + {basetime + random.randint(-200, -1)},'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, \ + {random.randint(-200,-1)},{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' \ + )" + body_list.append(body) + + for i in range(tbnum): + pre_insert = f"insert into {self.dbname}.t{i} values " + for value_body in body_list: + insert_sql = pre_insert + value_body + case_list.append(insert_sql) + + case_list.append(f'insert into {self.dbname}.nt1 values (now, 1, 1.0)') + case_list.append(f'insert into {self.dbname}.nt1 values ({basetime + 10}, 2, 2.0)') + case_list.append(f'insert into {self.dbname}.nt1 values ({basetime + 20}, 3, 3.0) {self.dbname}.nt2 values (now, 1, 1.0)') + case_list.append(f'insert into {self.dbname}.nt1 (ts, c2, c1) values ({basetime + 20}, 4.0, 4) ') + # exchange column order + case_list.append(f'insert into {self.dbname}.ct1 using {self.dbname}.stb1 (tag1) tags(1) (ts, c1) values (now, 1)') + + # insert with file + if not os.path.isfile(file): + with open(file=file, mode="w", encoding="utf-8", newline="") as f: + for j in range(data_row): + writer = csv.writer(f) + data_line = [ + basetime - (j + 1) * 10, random.randint(-200, -1), random.uniform(200, -1), + basetime + random.randint(-200, -1), f'"binary_{j}"', random.uniform(-200, -1), + random.choice([0, 1]), random.randint(-200, -1), random.randint(-200, -1), + random.randint(-127, -1), f'"nchar_{j}"' + ] + writer.writerow(data_line) + + case_list.append(f"insert into {self.dbname}.ct1 file {file}") + + return case_list + pass + + def data_case_err(self): + case_list = [] + nowtime = int(round(time.time()*1000)) + bigger_insert_sql = f"insert into {self.dbname}.nt1 values" + for i in range(40000): + bigger_insert_sql += f"({nowtime-i*10}, {i}, {i*1.0})" + case_list.append(bigger_insert_sql) + + nodata_sql = f"insert into {self.dbname}.nt1 values()" + case_list.append(nodata_sql) + + less_data_sql = f"insert into {self.dbname}.nt1 values(now)" + case_list.append(less_data_sql) + + errtype_data_sql = f"insert into {self.dbname}.nt1 values(now+2, 1.0, 'binary_2')" + case_list.append(errtype_data_sql) + + # insert into super table directly + insert_super_data_sql = f"insert into {self.dbname}.stb1 values(now+3, 1, 1.0)" + case_list.append(insert_super_data_sql) + + return case_list + + def port_case_current(self): + case_list = [6041] + return case_list + + def port_case_err(self): + case_list = [ + 6030, + 6051, + 666666666, + None, + "abcd" + ] + return case_list + + def api_case_current(self): + case_List = [ + "/rest/sql", + f"/rest/sql/{self.dbname}", + "/rest/sqlt", + f"/rest/sqlt/{self.dbname}", + "/rest/sqlutc", + f"/rest/sqlutc/{self.dbname}" + ] + return case_List + + def api_case_err(self): + case_list = [ + "", + "/rest1/sql", + "/rest/sqlsqltsqlutc", + 1, + ["/rest", "/sql"], + "/influxdb/v1/write", + "/opentsdb/v1/put/json/db", + "/opentsdb/v1/put/telnet/db", + "/rest*", + "*" + ] + return case_list + + def header_case_current(self): + case_list = [ + {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}, + {'Authorization': 'Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04'} + ] + return case_list + + def header_case_err(self): + case_list = [ + {'Authorization': 'Basic '}, + {'Authorization': 'Taosd /root/taosdata'}, + {'Authorization': True} + ] + return case_list + + def run_case_api_err(self): + err_cases = self.api_case_err() + count = 0 + data = "create database if not exists db" + for case in err_cases: + print(f"err api case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, api_url=case) + self.check_err_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_port_err(self): + err_cases = self.port_case_err() + count = 0 + data = "create database if not exists db" + for case in err_cases: + print(f"err port case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, port=case) + self.check_err_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_header_err(self): + err_cases = self.header_case_err() + count = 0 + data = "create database if not exists db" + for case in err_cases: + print(f"err header case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, header=case) + self.check_err_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_db_tb_err(self): + err_cases = self.db_tb_case_err() + count = 0 + query_msg = RestMsgInfo(base_url=self.base_url) + for case in err_cases: + print(f"err create db/tb case{count}: ", end="") + self.check_err_sql_case(query_msg=query_msg, data=case) + count += 1 + pass + + def run_case_data_err(self): + err_cases = self.data_case_err() + count = 0 + tdSql.execute(f"drop database if exists {self.dbname}") + tdSql.execute(f"create database if not exists {self.dbname} keep 3650 precision '{self.precision}' ") + tdSql.execute(f"use {self.dbname}") + tdSql.execute( + f''' + create stable {self.dbname}.stb1 ( + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + ) + tags( + tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool, + tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16) + ) + ''' + ) + + query_msg = RestMsgInfo(base_url=self.base_url) + for case in err_cases: + print(f"err insert data case{count}: ", end="") + self.check_err_sql_case(query_msg=query_msg, data=case) + count += 1 + + tdSql.execute(f"drop database if exists {self.dbname}") + pass + + def run_case_port_current(self): + current_cases = self.port_case_current() + count = 0 + data = "create database if not exists db" + for case in current_cases: + print(f"current port case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, port=case) + self.check_current_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_api_current(self): + current_cases = self.api_case_current() + count = 0 + data = "create database if not exists db" + for case in current_cases: + print(f"current api case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, api_url=case) + self.check_current_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_header_current(self): + current_cases = self.header_case_current() + count = 0 + data = "create database if not exists db" + for case in current_cases: + print(f"current header case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, header=case) + self.check_current_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_db_tb_current(self): + current_cases = self.db_tb_case_current() + count = 0 + for case in current_cases: + print(f"current insert db/tb case{count}: ", end="") + for api in ["/rest/sql", "/rest/sqlt", "/rest/sqlutc"]: + query_msg = RestMsgInfo(base_url=self.base_url, api_url=api) + self.check_current_case(query_msg=query_msg, data=case) + count += 1 + pass + + def run_case_data_current(self): + self.rest_test_table(dbname=self.dbname, tbnum=self.tbnum) + current_cases = self.data_case_current(tbnum=self.tbnum, data_row=self.data_row, basetime=self.basetime, file=self.file) + count = 0 + print(current_cases[12]) + api_cases = self.api_case_current() + for case in current_cases: + print(f"current insert data case{count}: ", end="") + for api in api_cases: + query_msg = RestMsgInfo(base_url=self.base_url, api_url=api) + self.check_current_case(query_msg=query_msg, data=case) + count += 1 + pass + + def run_case_err(self): + self.run_case_api_err() + self.run_case_port_err() + self.run_case_header_err() + self.run_case_db_tb_err() + self.run_case_data_err() + pass + + def run_case_current(self): + self.run_case_api_current() + self.run_case_port_current() + self.run_case_header_current() + self.run_case_db_tb_current() + self.run_case_data_current() + pass + + def run_all_case(self): + self.run_case_err() + self.run_case_current() + pass + + def set_default_args(self): + nowtime = int(round(time.time() * 1000)) + url = "127.0.0.1" + per_table_rows = 100 + tbnum = 10 + database_name = "db" + precision ="ms" + clear_data = True + insert_case_filename = "data_insert.csv" + config_default = { + "base_url" : url, + "precision" : precision, + "clear_data" : clear_data, + "database_name" : database_name, + "tbnum" : tbnum, + "data_row" : per_table_rows, + "case_file" : insert_case_filename, + "basetime" : nowtime, + "all_case" : False, + "all_err" : False, + "all_current" : True, + "err_case" : { + "port_err" : True, + "api_err" : True, + "header_err" : True, + "db_tb_err" : True, + "data_err" : True, + }, + "current_case" : { + "port_current" : True, + "api_current" : True, + "header_current" : True, + "db_tb_current" : True, + "data_current" : True, + } + } + + config_file_name = f"{os.path.dirname(os.path.abspath(__file__))}/rest_insert_config.json" + with open(config_file_name, "w") as f: + json.dump(config_default, f) + return config_file_name + + def run(self): + config_file = f"{os.path.dirname(os.path.abspath(__file__))}/rest_insert_config.json" + if not os.path.isfile(config_file): + config_file = self.set_default_args() + + with open(config_file, "r", encoding="utf-8") as f: + cfg = json.load(f) + + self.tbnum = cfg["tbnum"] + self.data_row = cfg["data_row"] + self.basetime = cfg["basetime"] + self.dbname = cfg["database_name"] + self.base_url = cfg["base_url"] + self.precision = cfg["precision"] + self.file = cfg["case_file"] + clear_data = True if cfg["clear_data"] else False + + if clear_data: + self.rest_test_table(dbname=self.dbname, tbnum=self.tbnum) + + run_all_case = True if cfg["all_case"] else False + run_all_err_case = True if cfg["all_err"] else False + run_all_current_case = True if cfg["all_current"] else False + run_port_err_case = True if cfg["err_case"]["port_err"] else False + run_api_err_case = True if cfg["err_case"]["api_err"] else False + run_header_err_case = True if cfg["err_case"]["header_err"] else False + run_db_tb_err_case = True if cfg["err_case"]["db_tb_err"] else False + run_data_err_case = True if cfg["err_case"]["data_err"] else False + run_port_current_case = True if cfg["current_case"]["port_current"] else False + run_api_current_case = True if cfg["current_case"]["api_current"] else False + run_header_current_case = True if cfg["current_case"]["header_current"] else False + run_db_tb_current_case = True if cfg["current_case"]["db_tb_current"] else False + run_data_current_case = True if cfg["current_case"]["data_current"] else False + + print("run_all_case:" ,run_all_case) + print("run_all_err_case:" ,run_all_err_case) + print("run_all_current_case:" ,run_all_current_case) + print("run_port_err_case:" ,run_port_err_case) + print("run_api_err_case:" ,run_api_err_case) + print("run_header_err_case:" ,run_header_err_case) + print("run_db_tb_err_case:" ,run_db_tb_err_case) + print("run_data_err_case:" ,run_data_err_case) + print("run_port_current_case:" ,run_port_current_case) + print("run_api_current_case:" ,run_api_current_case) + print("run_header_current_case:" ,run_header_current_case) + print("run_db_tb_current_case:" ,run_db_tb_current_case) + print("run_data_current_case:" ,run_data_current_case) + + + if not (run_all_err_case | run_all_current_case | run_port_err_case | run_api_err_case | run_header_err_case | + run_db_tb_err_case | run_data_err_case | run_port_current_case | run_api_current_case | + run_header_current_case | run_db_tb_current_case | run_data_current_case ): + run_all_case = True + if run_all_err_case & run_all_current_case: + run_all_case = True + + if run_all_case: + self.run_all_case() + return + if run_all_err_case : + self.run_case_err() + return + if run_all_current_case: + self.run_case_current() + return + if run_port_err_case: + self.run_case_port_err() + if run_api_err_case: + self.run_case_api_err() + if run_header_err_case: + self.run_case_header_err() + if run_db_tb_err_case: + self.run_case_db_tb_err() + if run_data_err_case: + self.run_case_data_err() + if run_port_current_case: + self.run_case_port_current() + if run_api_current_case: + self.run_case_api_current() + if run_header_current_case: + self.run_case_header_current() + if run_db_tb_current_case: + self.run_case_db_tb_current() + if run_data_current_case: + self.run_case_data_current() + pass + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/4-taosAdapter/taosAdapter_query.py b/tests/system-test/4-taosAdapter/taosAdapter_query.py new file mode 100644 index 0000000000000000000000000000000000000000..85e2c2d9ee8190aeff5b1764c22c9a93a7ff2432 --- /dev/null +++ b/tests/system-test/4-taosAdapter/taosAdapter_query.py @@ -0,0 +1,699 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +import sys +import os +import subprocess +import random +import inspect +import taos +import requests +import json +import traceback +import simplejson.errors +import math + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from collections import defaultdict + + + +class RestMsgInfo: + def __init__(self, base_url, + port=6041, + api_url="/rest/sql", + header={'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + ): + self.base_url = base_url + self.port = port + self.api_url = api_url + self.header = header + self.full_url = f"http://{base_url}:{port}{api_url}" + + +class TDTestCase: + def __init__(self): + self.base_url = "127.0.0.1" + self.dbname = "db" + self.precision = "ms" + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def caseDescription(self): + ''' + case1 : specified SQL + case2 : select sql,include stable 、child table and normal table, include correct SQL and invalid SQL \n + case3 : port test \n + case4 : api_url test \n + case5 : base_url test \n + case6 : header test \n + case7 : big data test + ''' + return + + def rest_test_table(self, dbname: str, tbnum: int) -> None : + + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} keep 3650 precision '{self.precision}' ") + tdSql.execute(f"use {dbname}") + + tdSql.execute( + f''' + create stable {dbname}.stb1 ( + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + ) + tags( + tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool, + tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16) + ) + ''' + ) + tdSql.execute( + f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(ttag1 int)" + ) + for i in range(tbnum): + tdSql.execute( + f''' + create table {dbname}.t{i} using {dbname}.stb1 + tags({i}, {i}, {1639032680000+i*10}, 'binary_{i}',{i},{random.choice([0, 1])}, {i},{i%32767},{i%127},'nchar_{i}') + ''' + ) + tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})") + pass + + def rest_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + def check_err_case(self,query_msg: RestMsgInfo, data): + url, header = query_msg.full_url, query_msg.header + try: + conn = requests.post(url=url, data=data, headers=header) + resp_code = conn.status_code + resp = conn.json() + if resp_code != 200: + print(f"expect error occured, url: {url}, sql: {data}, error code is :{resp_code}") + return + status = resp["status"] + desc = resp["desc"] + if resp_code == 200 and status == "error": + print(f"expect error occured, url: {url}, sql: {data}, error is :{desc}") + return + else: + tdLog.exit(f"expect error not occured") + except requests.exceptions.InvalidHeader as e: + tdLog.success(f"expect error occured, request header error, header: {header}, error: {e}") + except requests.exceptions.InvalidURL as e: + tdLog.success(f"expect error occured, request url error, url: {url}, error: {e}") + except requests.exceptions.ConnectionError as e: + tdLog.success(f"expect error occured, request connection error,url: {url}, error: {e}") + except simplejson.errors.JSONDecodeError as e: + tdLog.success(f"expect error occured, request json error,url: {url}, header: {header}, error: {e}") + except Exception as e: + tdLog.success(f"expect error occured, url: {url}, header: {header}, {traceback.print_exc()}") + # finally: + # conn.close() + + pass + + def check_err_sql_case(self,query_msg: RestMsgInfo, data): + url, header = query_msg.full_url, query_msg.header + try: + conn = requests.post(url=url, data=data, headers=header) + resp_code = conn.status_code + resp = conn.json() + status = resp["status"] + desc = resp["desc"] + if resp_code == 200 and status == "error": + tdLog.success(f"expect error occured, url: {url}, sql: {data}, error is :{desc}") + return + else: + tdLog.exit(f"expect error not occured") + except Exception as e: + traceback.print_exc() + raise e + + def check_current_case(self,query_msg: RestMsgInfo, data): + url, header = query_msg.full_url, query_msg.header + conn = requests.post(url=url, data=data, headers=header) + try: + resp_code = conn.status_code + resp = conn.json() + status = resp["status"] + if resp_code == 200 and status == "succ": + tdLog.printNoPrefix(f"restfull run success! url:{url}, sql: {data}") + else: + tdLog.exit(f"restful api test failed, url:{url}, sql: {data}") + except: + tdLog.debug(f"resp_code: {resp_code}, url: {url}") + traceback.print_exc() + raise + pass + + def check_case_res_data(self, query_msg: RestMsgInfo, data): + url, header, api = query_msg.full_url, query_msg.header, query_msg.api_url + try: + ts_col = [] + stb_list = [f"describe {self.dbname}.stb1", f"describe {self.dbname}.stb2"] + for stb in stb_list: + conn = requests.post(url=url, data=stb, headers=header) + resp = conn.json() + for col in resp["data"]: + if "TIMESTAMP" == col[1]: + ts_col.append(col[0]) + + index_dict = defaultdict(int) + conn = requests.post(url=url, data=data, headers=header) + resp = conn.json() + if resp["data"] is None: + return + for index, meta in enumerate(resp["column_meta"]): + if meta[0] in ts_col: + index_dict[meta[0]] = index + if len(index_dict) < 1: + return + + if self.precision == "ms" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if len(res_data[col_index]) !=23: + print(res_data) + tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}") + return + if self.precision == "ms" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 12: + print(res_data) + tdLog.exit( + f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}" + ) + return + if self.precision == "ms" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if len(res_data[col_index]) != 29 and len(res_data[col_index]) != 28 and len(res_data[col_index]) != 27 and len(res_data[col_index]) != 25: + print(res_data) + tdLog.exit( + f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}, length is: {len(res_data[col_index])}" + ) + return + if self.precision == "us" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if len(res_data[col_index]) !=26: + print(res_data) + tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}") + return + + if self.precision == "us" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 15: + print(res_data) + tdLog.exit( + f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}" + ) + return + + if self.precision == "us" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if len(res_data[col_index]) != 32 and len(res_data[col_index]) != 31 and len(res_data[col_index]) != 30 and len(res_data[col_index]) != 28: + print(res_data) + tdLog.exit( + f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}" + ) + return + if self.precision == "ns" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if len(res_data[col_index]) !=29: + print(res_data) + tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}") + return + + if self.precision == "ns" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 18: + print(res_data) + tdLog.exit( + f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}" + ) + return + + if self.precision == "ns" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"): + for col_name, col_index in index_dict.items(): + for res_data in resp["data"]: + if len(res_data[col_index]) != 35 and len(res_data[col_index]) != 34 and len(res_data[col_index]) != 33 and len(res_data[col_index]) != 31: + print(res_data) + tdLog.exit( + f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}" + ) + return + + except: + traceback.print_exc() + raise + + pass + + def sql_case_current(self): + case_list = [ + "show databases", + f"show {self.dbname}.stables", + f"show {self.dbname}.tables", + "select server_status()", + "select client_version()", + "select server_version()", + "select database()", + f"show create database {self.dbname}", + f"show create stable {self.dbname}.stb1", + f"select * from {self.dbname}.stb1", + f"select ts from {self.dbname}.stb1", + f"select _c0 from {self.dbname}.stb1", + f"select c1 from {self.dbname}.stb1", + f"select c2 from {self.dbname}.stb1", + f"select c3 from {self.dbname}.stb1", + f"select c4 from {self.dbname}.stb1", + f"select c5 from {self.dbname}.stb1", + f"select c6 from {self.dbname}.stb1", + f"select c7 from {self.dbname}.stb1", + f"select c8 from {self.dbname}.stb1", + f"select c9 from {self.dbname}.stb1", + f"select c10 from {self.dbname}.stb1", + f"select tbname from {self.dbname}.stb1", + f"select tag1 from {self.dbname}.stb1", + f"select tag2 from {self.dbname}.stb1", + f"select tag3 from {self.dbname}.stb1", + f"select tag4 from {self.dbname}.stb1", + f"select tag5 from {self.dbname}.stb1", + f"select tag6 from {self.dbname}.stb1", + f"select tag7 from {self.dbname}.stb1", + f"select tag8 from {self.dbname}.stb1", + f"select tag9 from {self.dbname}.stb1", + f"select tag10 from {self.dbname}.stb1", + f"select count(*) from {self.dbname}.stb1", + f"select count(c1) from {self.dbname}.stb1", + f"select avg(c1) from {self.dbname}.stb1", + f"select twa(c1) from {self.dbname}.stb1 group by tbname", + f"select sum(c1) from {self.dbname}.stb1", + f"select stddev(c1) from {self.dbname}.stb1", + f"select min(c1) from {self.dbname}.stb1", + f"select max(c1) from {self.dbname}.stb1", + f"select first(c1) from {self.dbname}.stb1", + f"select first(*) from {self.dbname}.stb1", + f"select last(c1) from {self.dbname}.stb1", + f"select last(*) from {self.dbname}.stb1", + f"select top(c1, 3) from {self.dbname}.stb1", + f"select bottom(c1, 3) from {self.dbname}.stb1", + f"select apercentile(c1, 50, 't-digest') from {self.dbname}.stb1", + f"select last_row(c1) from {self.dbname}.stb1", + f"select last_row(*) from {self.dbname}.stb1", + f"select interp(c1) from {self.dbname}.stb1 where ts=0 group by tbname", + f"select interp(c1) from {self.dbname}.stb1 where ts=0 fill(next) group by tbname", + f"select interp(c1) from {self.dbname}.stb1 where ts>0 and ts <100000000 every(5s) group by tbname", + f"select diff(c1) from {self.dbname}.stb1 group by tbname", + f"select derivative(c1, 10m, 0) from {self.dbname}.stb1 group by tbname", + f"select derivative(c1, 10m, 1) from {self.dbname}.stb1 group by tbname", + f"select spread(c1) from {self.dbname}.stb1", + f"select ceil(c1) from {self.dbname}.stb1", + f"select floor(c1) from {self.dbname}.stb1", + f"select round(c1) from {self.dbname}.stb1", + f"select c1*2+2%c2-c2/2 from {self.dbname}.stb1", + f"select max(c1) from {self.dbname}.stb1 where ts>'2021-12-05 18:25:41.136' and ts<'2021-12-05 18:25:44.13' interval(1s) sliding(500a) fill(NULL) group by tbname", + f"select max(c1) from {self.dbname}.stb1 where (c1 >=0 and c1 <> 0 and c2 is not null or c1 < -1 or (c2 between 1 and 10) ) and tbname like 't_' ", + f"select max(c1) from {self.dbname}.stb1 group by tbname order by ts desc slimit 2 soffset 2 limit 1 offset 0", + f"select max(c1) from {self.dbname}.stb1 group by c6 order by ts desc slimit 1 soffset 1 limit 1 offset 0 ", + f"select * from {self.dbname}.t1", + f"select ts from {self.dbname}.t1", + f"select _c0 from {self.dbname}.t1", + f"select c1 from {self.dbname}.t1", + f"select c2 from {self.dbname}.t1", + f"select c3 from {self.dbname}.t1", + f"select c4 from {self.dbname}.t1", + f"select c5 from {self.dbname}.t1", + f"select c6 from {self.dbname}.t1", + f"select c7 from {self.dbname}.t1", + f"select c8 from {self.dbname}.t1", + f"select c9 from {self.dbname}.t1", + f"select c10 from {self.dbname}.t1", + f"select tbname from {self.dbname}.t1", + f"select tag1 from {self.dbname}.t1", + f"select tag2 from {self.dbname}.t1", + f"select tag3 from {self.dbname}.t1", + f"select tag4 from {self.dbname}.t1", + f"select tag5 from {self.dbname}.t1", + f"select tag6 from {self.dbname}.t1", + f"select tag7 from {self.dbname}.t1", + f"select tag8 from {self.dbname}.t1", + f"select tag9 from {self.dbname}.t1", + f"select tag10 from {self.dbname}.t1", + f"select count(*) from {self.dbname}.t1", + f"select count(c1) from {self.dbname}.t1", + f"select avg(c1) from {self.dbname}.t1", + f"select twa(c1) from {self.dbname}.t1", + f"select sum(c1) from {self.dbname}.t1", + f"select stddev(c1) from {self.dbname}.t1", + f"select leastsquares(c1, 1, 1) from {self.dbname}.t1", + f"select min(c1) from {self.dbname}.t1", + f"select max(c1) from {self.dbname}.t1", + f"select first(c1) from {self.dbname}.t1", + f"select first(*) from {self.dbname}.t1", + f"select last(c1) from {self.dbname}.t1", + f"select last(*) from {self.dbname}.t1", + f"select top(c1, 3) from {self.dbname}.t1", + f"select bottom(c1, 3) from {self.dbname}.t1", + f"select percentile(c1, 50) from {self.dbname}.t1", + f"select apercentile(c1, 50, 't-digest') from {self.dbname}.t1", + f"select last_row(c1) from {self.dbname}.t1", + f"select last_row(*) from {self.dbname}.t1", + f"select interp(c1) from {self.dbname}.t1 where ts=0 ", + f"select interp(c1) from {self.dbname}.t1 where ts=0 fill(next)", + f"select interp(c1) from {self.dbname}.t1 where ts>0 and ts <100000000 every(5s)", + f"select diff(c1) from {self.dbname}.t1", + f"select derivative(c1, 10m, 0) from {self.dbname}.t1", + f"select derivative(c1, 10m, 1) from {self.dbname}.t1", + f"select spread(c1) from {self.dbname}.t1", + f"select ceil(c1) from {self.dbname}.t1", + f"select floor(c1) from {self.dbname}.t1", + f"select round(c1) from {self.dbname}.t1", + f"select c1*2+2%c2-c2/2 from {self.dbname}.t1", + f"select max(c1) from {self.dbname}.t1 where ts>'2021-12-05 18:25:41.136' and ts<'2021-12-05 18:25:44.13' interval(1s) sliding(500a) fill(NULL)", + f"select max(c1) from {self.dbname}.t1 where (c1 >=0 and c1 <> 0 and c2 is not null or c1 < -1 or (c2 between 1 and 10) ) and c10 like 'nchar___1' ", + f"select max(c1) from {self.dbname}.t1 group by c6 order by ts desc ", + f"select stb1.c1, stb2.c1 from {self.dbname}.stb1 stb1, {self.dbname}.stb2 stb2 where stb1.ts=stb2.ts and stb1.tag1=stb2.ttag1", + f"select t1.c1, t2.c1 from {self.dbname}.t1 t1, {self.dbname}.t2 t2 where t1.ts=t2.ts", + f"select c1 from (select c2 c1 from {self.dbname}.stb1) ", + f"select c1 from {self.dbname}.t1 union all select c1 from {self.dbname}.t2" + ] + return case_list + + def sql_case_err(self): + case_list = [ + "show database", + f"select percentile(c1, 50) from {self.dbname}.stb1 group by tbname", + f"select leastsquares(c1, 1, 1) from {self.dbname}.stb1", + ] + return case_list + + def port_case_current(self): + case_list = [6041] + return case_list + + def port_case_err(self): + case_list = [ + 6030, + 6051, + 666666666, + None, + "abcd" + ] + return case_list + + def api_case_current(self): + case_List = [ + "/rest/sql", + f"/rest/sql/{self.dbname}", + "/rest/sqlt", + f"/rest/sqlt/{self.dbname}", + "/rest/sqlutc", + f"/rest/sqlutc/{self.dbname}" + ] + return case_List + + def api_case_err(self): + case_list = [ + "", + "/rest1/sql", + "/rest/sqlsqltsqlutc", + 1, + ["/rest", "/sql"], + "/influxdb/v1/write", + "/opentsdb/v1/put/json/db", + "/opentsdb/v1/put/telnet/db", + "/rest*", + "*" + ] + return case_list + + def header_case_current(self): + case_list = [ + {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}, + {'Authorization': 'Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04'} + ] + return case_list + + def header_case_err(self): + case_list = [ + {'Authorization': 'Basic '}, + {'Authorization': 'Taosd /root/taosdata'}, + {'Authorization': True} + ] + return case_list + + def run_case_api_err(self): + err_cases = self.api_case_err() + count = 0 + data = "show databases" + for case in err_cases: + print(f"err api case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, api_url=case) + self.check_err_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_port_err(self): + err_cases = self.port_case_err() + count = 0 + data = "show databases" + for case in err_cases: + print(f"err port case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, port=case) + self.check_err_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_header_err(self): + err_cases = self.header_case_err() + count = 0 + data = "show databases" + for case in err_cases: + print(f"err header case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, header=case) + self.check_err_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_sql_err(self): + err_cases = self.sql_case_err() + count = 0 + for case in err_cases: + print(f"err sql case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url) + self.check_err_sql_case(query_msg=query_msg, data=case) + count += 1 + pass + + def run_case_port_current(self): + current_cases = self.port_case_current() + count = 0 + data = "show databases" + for case in current_cases: + print(f"current port case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, port=case) + self.check_current_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_api_current(self): + current_cases = self.api_case_current() + count = 0 + data = "show databases" + for case in current_cases: + print(f"current api case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, api_url=case) + self.check_current_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_header_current(self): + current_cases = self.header_case_current() + count = 0 + data = "show databases" + for case in current_cases: + print(f"current header case{count}: ", end="") + query_msg = RestMsgInfo(base_url=self.base_url, header=case) + self.check_current_case(query_msg=query_msg, data=data) + count += 1 + pass + + def run_case_sql_current(self): + current_cases = self.sql_case_current() + count = 0 + api_cases = self.api_case_current() + for case in current_cases: + print(f"current sql case{count}: ", end="") + for api in api_cases: + query_msg = RestMsgInfo(base_url=self.base_url, api_url=api) + self.check_current_case(query_msg=query_msg, data=case) + self.check_case_res_data(query_msg=query_msg, data=case) + count += 1 + pass + + def run_case_err(self): + self.run_case_api_err() + self.run_case_port_err() + self.run_case_header_err() + self.run_case_sql_err() + pass + + def run_case_current(self): + self.run_case_api_current() + self.run_case_port_current() + self.run_case_header_current() + self.run_case_sql_current() + pass + + def run_all_case(self): + self.run_case_err() + self.run_case_current() + pass + + def set_default_args(self): + nowtime = int(round(time.time() * 1000)) + url = "127.0.0.1" + per_table_rows = 100 + tbnum = 10 + database_name = "db" + precision ="ms" + clear_data = True + config_default = { + "base_url" : url, + "precision" : precision, + "clear_data" : clear_data, + "database_name": database_name, + "tbnum" : tbnum, + "data_row" : per_table_rows, + "basetime" : nowtime, + "all_case" : False, + "all_err" : False, + "all_current" : True, + "err_case" : { + "port_err" : True, + "api_err" : True, + "header_err" : True, + "sql_err" : True, + }, + "current_case" : { + "port_current" : True, + "api_current" : True, + "header_current" : True, + "sql_current" : True, + } + } + + config_file_name = f"{os.path.dirname(os.path.abspath(__file__))}/rest_query_config.json" + with open(config_file_name, "w") as f: + json.dump(config_default, f) + return config_file_name + + def run(self): + config_file = f"{os.path.dirname(os.path.abspath(__file__))}/rest_query_config.json" + if not os.path.isfile(config_file): + config_file = self.set_default_args() + + with open(config_file, "r", encoding="utf-8") as f: + cfg = json.load(f) + + tbnum = cfg["tbnum"] + data_row = cfg["data_row"] + basetime = cfg["basetime"] + self.dbname = cfg["database_name"] + self.base_url = cfg["base_url"] + self.precision = cfg["precision"] + clear_data = True if cfg["clear_data"] else False + + if clear_data: + self.rest_test_table(dbname=self.dbname, tbnum=tbnum) + self.rest_test_data(tbnum=tbnum, data_row=data_row, basetime=basetime) + + run_all_case = True if cfg["all_case"] else False + run_all_err_case = True if cfg["all_err"] else False + run_all_current_case = True if cfg["all_current"] else False + run_port_err_case = True if cfg["err_case"]["port_err"] else False + run_api_err_case = True if cfg["err_case"]["api_err"] else False + run_header_err_case = True if cfg["err_case"]["header_err"] else False + run_sql_err_case = True if cfg["err_case"]["sql_err"] else False + run_port_current_case = True if cfg["current_case"]["port_current"] else False + run_api_current_case = True if cfg["current_case"]["api_current"] else False + run_header_current_case = True if cfg["current_case"]["header_current"] else False + run_sql_current_case = True if cfg["current_case"]["sql_current"] else False + + if not (run_all_err_case | run_all_current_case | run_port_err_case | run_api_err_case | + run_header_err_case | run_sql_err_case | run_port_current_case | run_api_current_case + | run_header_current_case | run_sql_current_case): + run_all_case = True + if run_all_err_case & run_all_current_case: + run_all_case = True + + if run_all_case: + self.run_all_case() + return + if run_all_err_case : + self.run_case_err() + return + if run_all_current_case: + self.run_case_current() + return + if run_port_err_case: + self.run_case_port_err() + if run_api_err_case: + self.run_case_api_err() + if run_header_err_case: + self.run_case_header_err() + if run_sql_err_case: + self.run_case_sql_err() + if run_port_current_case: + self.run_case_port_current() + if run_api_current_case: + self.run_case_api_current() + if run_header_current_case: + self.run_case_header_current() + if run_sql_current_case: + self.run_case_sql_current() + pass + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index aae04b728c73ba34dc2f3fd064cde37f210676e5..f9644fa1c867b028cdf038067322374aaf7832fc 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -1,3 +1,5 @@ python3 test.py -f 0-management/1-stable/create_col_tag.py +python3 test.py -f 4-taosAdapter/taosAdapter_query.py +python3 test.py -f 4-taosAdapter/taosAdapter_insert.py #python3 test.py -f 2-query/9-others/TD-11389.py # this case will run when this bug fix TD-11389