提交 71917854 编写于 作者: W wenzhouwww

Merge branch 'develop' of github.com:taosdata/TDengine into develop

Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756
Subproject commit 9015e129bd7de389afa4196495451669700904d0
......@@ -83,9 +83,10 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它
* [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。
## [TDengine 组件与工具](/tools/adapter)
## [TDengine 组件与工具](/cn/documentation/)
* [taosAdapter用户手册](/tools/adapter)
* [taosAdapter 用户手册](/tools/adapter)
* [TDinsight 用户手册](/tools/insight)
## [与其他工具的连接](/connections)
......
......@@ -14,7 +14,7 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
TDengine 的 Grafana 插件请从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载。
```bash
GF_VERSION=3.1.1
GF_VERSION=3.1.3
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
```
......@@ -75,15 +75,7 @@ allow_loading_unsigned_plugins = tdengine-datasource
#### 导入 Dashboard
我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)
点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载:
![img](../images/connections/import_dashboard1.jpg)
导入完成之后可看到如下效果:
![img](../images/connections/dashboard-15146.png)
在 2.3.3.0 及以上版本,您可以导入 TDinsight Dashboard (Grafana Dashboard ID: [15167](https://grafana.com/grafana/dashboards/15167)) 作为 TDengine 集群的监控可视化工具。安装和使用说明请见 [TDinsight 用户手册](https://www.taosdata.com/cn/documentation/tools/insight)
## <a class="anchor" id="matlab"></a>MATLAB
......
......@@ -79,9 +79,10 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment
- [Rust Connector](/connector/rust): A taosc/RESTful API based TDengine client for Rust
## [Components and Tools](/tools/adapter)
## [Components and Tools](/cn/documentation/)
* [taosAdapter](/tools/adapter)
* [taosAdapter User Manual](/tools/adapter)
* [TDinsight User Manual](/tools/insight)
## [Connections with Other Tools](/connections)
......
......@@ -15,7 +15,7 @@ https://grafana.com/grafana/download.
Download grafana plugin from <https://github.com/taosdata/grafanaplugin/releases/latest> .
```bash
GF_VERSION=3.1.1
GF_VERSION=3.1.3
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
```
......@@ -69,15 +69,7 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard
We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)
Click the `Import` button on the left panel and load the grafana id:
![img](../images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported.
![img](../images/connections/dashboard-15146.png)
We provide a TDinsight dashboard (via Grafana dashboard id: [15167](https://grafana.com/grafana/dashboards/15167)) for TDengine cluster monitoring since TDengine 2.3.3.x . Please refer to [TDinsight User Manual](https://www.taosdata.com/en/documentation/tools/insight) for the details.
## <a class="anchor" id="matlab"></a> MATLAB
......
......@@ -1705,7 +1705,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
SSqlObj* pSql = pStmt->pSql;
SSqlCmd* pCmd = &pSql->cmd;
uint32_t nameLen = (uint32_t)strlen(name);
int32_t nameLen = (int32_t)strlen(name);
if (name == NULL || nameLen <= 0) {
tscError("0x%"PRIx64" tbname is NULL", pSql->self);
......@@ -1727,18 +1727,22 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
SStrToken tname = {0};
tname.type = TK_STRING;
tname.z = (char *)strdup(name);
tname.n = (uint32_t)strlen(name);
if (!tname.z) {
tscError("0x%" PRIx64 " out of memory", pSql->self);
STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);
}
tname.n = (uint32_t)strlen(tname.z);
bool dbIncluded = false;
// Check if the table name available or not
if (tscValidateName(&tname, true, &dbIncluded) != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" tbname[%s] is invalid", pSql->self, name);
tscError("0x%" PRIx64 " tbname[%s] is invalid", pSql->self, tname.z);
free(tname.z);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "name is invalid"));
}
uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, name, strlen(name));
uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, tname.z, tname.n);
if (uid != NULL) {
pStmt->mtb.currentUid = *uid;
......@@ -1752,6 +1756,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
if ((*t1)->pData == NULL) {
code = tscCreateDataBlockData(*t1, TSDB_PAYLOAD_SIZE, (*t1)->pTableMeta->tableInfo.rowSize, sizeof(SSubmitBlk));
if (code != TSDB_CODE_SUCCESS) {
free(tname.z);
STMT_RET(code);
}
}
......@@ -1766,7 +1771,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES);
tscDebug("0x%"PRIx64" table:%s is already prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid);
tscDebug("0x%" PRIx64 " table:%s is already prepared, uid:%" PRIu64, pSql->self, tname.z, pStmt->mtb.currentUid);
free(tname.z);
STMT_RET(TSDB_CODE_SUCCESS);
}
......@@ -1779,18 +1784,19 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
SName fullname = {0};
tscSetTableFullName(&fullname, &tname, pSql, dbIncluded);
free(tname.z);
memcpy(&pTableMetaInfo->name, &fullname, sizeof(fullname));
code = tscGetTableMetaEx(pSql, pTableMetaInfo, false, true);
if (code != TSDB_CODE_SUCCESS) {
free(tname.z);
STMT_RET(code);
}
pTableMeta = pTableMetaInfo->pTableMeta;
if (strcmp(sTableName, pTableMeta->sTableName)) {
free(tname.z);
tscError("0x%"PRIx64" only tables belongs to one stable is allowed", pSql->self);
STMT_RET(TSDB_CODE_TSC_APP_ERROR);
}
......@@ -1806,25 +1812,26 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
taosHashPut(pStmt->mtb.pTableHash, tname.z, tname.n, (char*)&pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
tscDebug("0x%" PRIx64 " table:%s is prepared, uid:%" PRIx64, pSql->self, tname.z, pStmt->mtb.currentUid);
free(tname.z);
STMT_RET(TSDB_CODE_SUCCESS);
}
free(tname.z);
if (pStmt->mtb.tagSet) {
pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name);
pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, tname.z);
} else {
if (tags == NULL) {
tscError("No tags set");
free(tname.z);
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "no tags set"));
}
int32_t ret = stmtGenInsertStatement(pSql, pStmt, name, tags);
int32_t ret = stmtGenInsertStatement(pSql, pStmt, tname.z, tags);
if (ret != TSDB_CODE_SUCCESS) {
free(tname.z);
STMT_RET(ret);
}
}
......@@ -1859,6 +1866,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
code = tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
if (code != TSDB_CODE_SUCCESS) {
free(tname.z);
STMT_RET(code);
}
......@@ -1869,15 +1877,16 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
pStmt->mtb.tbNum++;
taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
taosHashPut(pStmt->mtb.pTableHash, tname.z, tname.n, (char*)&pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
if (pStmt->mtb.lastBlock == NULL) {
insertStmtGenLastBlock(&pStmt->mtb.lastBlock, pBlock);
}
tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
tscDebug("0x%" PRIx64 " table:%s is prepared, uid:%" PRIx64, pSql->self, tname.z, pStmt->mtb.currentUid);
}
free(tname.z);
STMT_RET(code);
}
......
......@@ -132,8 +132,7 @@ int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *le
return TSDB_CODE_SUCCESS;
}
static void tscStrToLower(char *str, int32_t n) {
UNUSED_FUNC static void tscStrToLower(char* str, int32_t n) {
if (str == NULL || n <= 0) { return;}
for (int32_t i = 0; i < n; i++) {
if (str[i] >= 'A' && str[i] <= 'Z') {
......@@ -3029,7 +3028,8 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
if (pToken->type == TK_STRING) {
tscDequoteAndTrimToken(pToken);
tscStrToLower(pToken->z, pToken->n);
// tscStrToLower(pToken->z, pToken->n);
strntolower(pToken->z, pToken->z, pToken->n);
//pToken->n = (uint32_t)strtrim(pToken->z);
int len = tGetToken(pToken->z, &pToken->type);
......@@ -3083,7 +3083,8 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
if (validateQuoteToken(pToken, escapeEnabled, NULL) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
} else {
tscStrToLower(pToken->z,pToken->n);
// tscStrToLower(pToken->z,pToken->n);
strntolower(pToken->z, pToken->z, pToken->n);
firstPartQuote = true;
}
}
......@@ -3101,7 +3102,8 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
if (validateQuoteToken(pToken, escapeEnabled, NULL) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
} else {
tscStrToLower(pToken->z,pToken->n);
// tscStrToLower(pToken->z,pToken->n);
strntolower(pToken->z, pToken->z, pToken->n);
}
}
......
......@@ -37,7 +37,7 @@
#define HTTP_BUFFER_SIZE 8388608
#define HTTP_STEP_SIZE 4096 //http message get process step by step
#define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size
#define HTTP_GC_TARGET_SIZE 512
#define HTTP_GC_TARGET_SIZE 16384
#define HTTP_WRITE_RETRY_TIMES 500
#define HTTP_WRITE_WAIT_TIME_MS 5
#define HTTP_PASSWORD_LEN TSDB_UNI_LEN
......
......@@ -130,14 +130,34 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
// for group by
if (groupFields != -1) {
char target[HTTP_GC_TARGET_SIZE] = {0};
int32_t len;
len = snprintf(target, HTTP_GC_TARGET_SIZE, "%s{", aliasBuffer);
int32_t len = 0, cur = 0;
cur = snprintf(target, HTTP_GC_TARGET_SIZE, "%s{", aliasBuffer);
if (cur < 0 || cur >= HTTP_GC_TARGET_SIZE) {
httpError("context:%p, fd:%d, too long alias: %s", pContext, pContext->fd, aliasBuffer);
return false;
}
len += cur;
for (int32_t i = dataFields + 1; i < num_fields; i++) {
// -2 means the last '}' and '\0'
#define HTTP_GC_CHECK_SIZE(name) if (cur < 0 || cur >= HTTP_GC_TARGET_SIZE - len - 2) { \
if (cur < 0) { \
httpError("context:%p, fd:%d, failed to snprintf for: %s", pContext, pContext->fd, name); \
} else { \
httpError("context:%p, fd:%d, snprintf overflow for: %s", pContext, pContext->fd, name); \
target[len] = '\0'; \
} \
break; \
} else { \
len += cur; \
}
if (row[i] == NULL) {
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:nil", fields[i].name);
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:nil", fields[i].name);
HTTP_GC_CHECK_SIZE(fields[i].name)
if (i < num_fields - 1) {
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, ", ");
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, ", ");
HTTP_GC_CHECK_SIZE(fields[i].name)
}
continue;
......@@ -146,40 +166,49 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
switch (fields[i].type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int8_t *)row[i]));
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%d", fields[i].name, *((int8_t *)row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break;
case TSDB_DATA_TYPE_SMALLINT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int16_t *)row[i]));
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%d", fields[i].name, *((int16_t *)row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break;
case TSDB_DATA_TYPE_INT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d,", fields[i].name, *((int32_t *)row[i]));
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%d,", fields[i].name, *((int32_t *)row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break;
case TSDB_DATA_TYPE_BIGINT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%" PRId64, fields[i].name, *((int64_t *)row[i]));
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%" PRId64, fields[i].name, *((int64_t *)row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break;
case TSDB_DATA_TYPE_FLOAT:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, GET_FLOAT_VAL(row[i]));
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%.5f", fields[i].name, GET_FLOAT_VAL(row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break;
case TSDB_DATA_TYPE_DOUBLE:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.9f", fields[i].name, GET_DOUBLE_VAL(row[i]));
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%.9f", fields[i].name, GET_DOUBLE_VAL(row[i]));
HTTP_GC_CHECK_SIZE(fields[i].name)
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
if (row[i] != NULL) {
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:", fields[i].name);
memcpy(target + len, (char *)row[i], length[i]);
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:", fields[i].name);
HTTP_GC_CHECK_SIZE(fields[i].name)
memcpy(target + len, (char *)row[i], MIN(length[i], HTTP_GC_TARGET_SIZE - len - 3));
len = (int32_t)strlen(target);
}
break;
default:
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%s", fields[i].name, "-");
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, "%s:%s", fields[i].name, "-");
HTTP_GC_CHECK_SIZE(fields[i].name)
break;
}
if (i < num_fields - 1) {
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, ", ");
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 2, ", ");
HTTP_GC_CHECK_SIZE(fields[i].name)
}
}
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "}");
cur = snprintf(target + len, HTTP_GC_TARGET_SIZE - len - 1, "}");
if (strcmp(target, targetBuffer) != 0) {
// first target not write this section
......
python3 test.py -f 0-management/3-tag/json_tag.py
python3 test.py -f 1-insert/0-sql/basic.py
python3 test.py -f 1-insert/0-sql/batchInsert.py
\ No newline at end of file
python3 test.py -f 1-insert/0-sql/batchInsert.py
......@@ -17,7 +17,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.35</version>
<version>2.0.36</version>
</dependency>
</dependencies>
......
package com.taosdata.example;
import com.taosdata.jdbc.TSDBDriver;
import java.sql.*;
import java.util.Properties;
public class ClientParameterSetting {
private static final String host = "127.0.0.1";
public static void main(String[] args) throws SQLException {
setParameterInJdbcUrl();
setParameterInProperties();
}
private static void setParameterInJdbcUrl() throws SQLException {
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?debugFlag=135&asyncLog=0";
Connection connection = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
printDatabase(connection);
connection.close();
}
private static void setParameterInProperties() throws SQLException {
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/";
Properties properties = new Properties();
properties.setProperty("user", "root");
properties.setProperty("password", "taosdata");
properties.setProperty("debugFlag", "135");
properties.setProperty("asyncLog", "0");
properties.setProperty("maxSQLLength", "1048576");
try (Connection conn = DriverManager.getConnection(jdbcUrl, properties)) {
printDatabase(conn);
}
}
private static void printDatabase(Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
ResultSet rs = stmt.executeQuery("show databases");
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
}
System.out.println();
}
}
}
}
package com.taosdata.example;
import com.taosdata.jdbc.TSDBPreparedStatement;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Random;
public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 20;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
};
private static final int numOfSubTable = 10, numOfRow = 10;
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/";
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
init(conn);
bindInteger(conn);
bindFloat(conn);
bindBoolean(conn);
bindBytes(conn);
bindString(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_parabind");
stmt.execute("create database if not exists test_parabind");
stmt.execute("use test_parabind");
for (int i = 0; i < schemaList.length; i++) {
stmt.execute(schemaList[i]);
}
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t1_" + i);
// set tags
pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE));
pstmt.setTagLong(3, random.nextLong());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Byte> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setByte(1, f1List);
ArrayList<Short> f2List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setShort(2, f2List);
ArrayList<Integer> f3List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f3List.add(random.nextInt(Integer.MAX_VALUE));
pstmt.setInt(3, f3List);
ArrayList<Long> f4List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f4List.add(random.nextLong());
pstmt.setLong(4, f4List);
// add column
pstmt.columnDataAddBatch();
}
// execute column
pstmt.columnDataExecuteBatch();
}
}
private static void bindFloat(Connection conn) throws SQLException {
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class);
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t2_" + i);
// set tags
pstmt.setTagFloat(0, random.nextFloat());
pstmt.setTagDouble(1, random.nextDouble());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Float> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(random.nextFloat());
pstmt.setFloat(1, f1List);
ArrayList<Double> f2List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f2List.add(random.nextDouble());
pstmt.setDouble(2, f2List);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
// close if no try-with-catch statement is used
pstmt.close();
}
private static void bindBoolean(Connection conn) throws SQLException {
String sql = "insert into ? using stable3 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t3_" + i);
// set tags
pstmt.setTagBoolean(0, random.nextBoolean());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Boolean> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(random.nextBoolean());
pstmt.setBoolean(1, f1List);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindBytes(Connection conn) throws SQLException {
String sql = "insert into ? using stable4 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t4_" + i);
// set tags
pstmt.setTagString(0, new String("abc"));
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<String> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add(new String("abc"));
}
pstmt.setString(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindString(Connection conn) throws SQLException {
String sql = "insert into ? using stable5 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
pstmt.setTagNString(0, "北京-abc");
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<String> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add("北京-abc");
}
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
}
......@@ -25,7 +25,6 @@ clean:
rm $(ROOT)asyncdemo
rm $(ROOT)demo
rm $(ROOT)prepare
rm $(ROOT)batchprepare
rm $(ROOT)stream
rm $(ROOT)subscribe
rm $(ROOT)apitest
......@@ -857,6 +857,542 @@ void verify_prepare3(TAOS* taos) {
}
/**
* @brief Verify the upper/lower case of tableName for create(by setTableName)/query/show/describe/drop.
* https://jira.taosdata.com:18080/browse/TS-904
* https://jira.taosdata.com:18090/pages/viewpage.action?pageId=129140555
* @param taos
*/
void verify_prepare4(TAOS* taos) {
printf("Verify the upper/lower case of tableName for create(by setTableName)/query/show/describe/drop etc.\n");
TAOS_RES* result = taos_query(taos, "drop database if exists test;");
taos_free_result(result);
usleep(100000);
result = taos_query(taos, "create database test;");
int code = taos_errno(result);
if (code != 0) {
printf("\033[31mfailed to create database, reason:%s\033[0m\n", taos_errstr(result));
taos_free_result(result);
exit(EXIT_FAILURE);
}
taos_free_result(result);
usleep(100000);
taos_select_db(taos, "test");
// create table
const char* sql =
"create stable st1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin "
"binary(40), blob nchar(10), u1 tinyint unsigned, u2 smallint unsigned, u4 int unsigned, u8 bigint unsigned) "
"tags "
"(b_tag bool, v1_tag tinyint, v2_tag smallint, v4_tag int, v8_tag bigint, f4_tag float, f8_tag double, bin_tag "
"binary(40), blob_tag nchar(10), u1_tag tinyint unsigned, u2_tag smallint unsigned, u4_tag int unsigned, u8_tag "
"bigint "
"unsigned)";
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
printf("\033[31mfailed to create table, reason:%s\033[0m\n", taos_errstr(result));
taos_free_result(result);
exit(EXIT_FAILURE);
}
taos_free_result(result);
TAOS_BIND tags[13];
struct {
int8_t b;
int8_t v1;
int16_t v2;
int32_t v4;
int64_t v8;
float f4;
double f8;
char bin[40];
char blob[80];
uint8_t u1;
uint16_t u2;
uint32_t u4;
uint64_t u8;
} id = {0};
id.b = (int8_t)1;
id.v1 = (int8_t)1;
id.v2 = (int16_t)2;
id.v4 = (int32_t)4;
id.v8 = (int64_t)8;
id.f4 = (float)40;
id.f8 = (double)80;
for (int j = 0; j < sizeof(id.bin); ++j) {
id.bin[j] = (char)('1' + '0');
}
strcpy(id.blob, "一二三四五六七八九十");
id.u1 = (uint8_t)1;
id.u2 = (uint16_t)2;
id.u4 = (uint32_t)4;
id.u8 = (uint64_t)8;
tags[0].buffer_type = TSDB_DATA_TYPE_BOOL;
tags[0].buffer_length = sizeof(id.b);
tags[0].buffer = &id.b;
tags[0].length = &tags[0].buffer_length;
tags[0].is_null = NULL;
tags[1].buffer_type = TSDB_DATA_TYPE_TINYINT;
tags[1].buffer_length = sizeof(id.v1);
tags[1].buffer = &id.v1;
tags[1].length = &tags[1].buffer_length;
tags[1].is_null = NULL;
tags[2].buffer_type = TSDB_DATA_TYPE_SMALLINT;
tags[2].buffer_length = sizeof(id.v2);
tags[2].buffer = &id.v2;
tags[2].length = &tags[2].buffer_length;
tags[2].is_null = NULL;
tags[3].buffer_type = TSDB_DATA_TYPE_INT;
tags[3].buffer_length = sizeof(id.v4);
tags[3].buffer = &id.v4;
tags[3].length = &tags[3].buffer_length;
tags[3].is_null = NULL;
tags[4].buffer_type = TSDB_DATA_TYPE_BIGINT;
tags[4].buffer_length = sizeof(id.v8);
tags[4].buffer = &id.v8;
tags[4].length = &tags[4].buffer_length;
tags[4].is_null = NULL;
tags[5].buffer_type = TSDB_DATA_TYPE_FLOAT;
tags[5].buffer_length = sizeof(id.f4);
tags[5].buffer = &id.f4;
tags[5].length = &tags[5].buffer_length;
tags[5].is_null = NULL;
tags[6].buffer_type = TSDB_DATA_TYPE_DOUBLE;
tags[6].buffer_length = sizeof(id.f8);
tags[6].buffer = &id.f8;
tags[6].length = &tags[6].buffer_length;
tags[6].is_null = NULL;
tags[7].buffer_type = TSDB_DATA_TYPE_BINARY;
tags[7].buffer_length = sizeof(id.bin);
tags[7].buffer = &id.bin;
tags[7].length = &tags[7].buffer_length;
tags[7].is_null = NULL;
tags[8].buffer_type = TSDB_DATA_TYPE_NCHAR;
tags[8].buffer_length = strlen(id.blob);
tags[8].buffer = &id.blob;
tags[8].length = &tags[8].buffer_length;
tags[8].is_null = NULL;
tags[9].buffer_type = TSDB_DATA_TYPE_UTINYINT;
tags[9].buffer_length = sizeof(id.u1);
tags[9].buffer = &id.u1;
tags[9].length = &tags[9].buffer_length;
tags[9].is_null = NULL;
tags[10].buffer_type = TSDB_DATA_TYPE_USMALLINT;
tags[10].buffer_length = sizeof(id.u2);
tags[10].buffer = &id.u2;
tags[10].length = &tags[10].buffer_length;
tags[10].is_null = NULL;
tags[11].buffer_type = TSDB_DATA_TYPE_UINT;
tags[11].buffer_length = sizeof(id.u4);
tags[11].buffer = &id.u4;
tags[11].length = &tags[11].buffer_length;
tags[11].is_null = NULL;
tags[12].buffer_type = TSDB_DATA_TYPE_UBIGINT;
tags[12].buffer_length = sizeof(id.u8);
tags[12].buffer = &id.u8;
tags[12].length = &tags[12].buffer_length;
tags[12].is_null = NULL;
// insert 10 records
struct {
int64_t ts[10];
int8_t b[10];
int8_t v1[10];
int16_t v2[10];
int32_t v4[10];
int64_t v8[10];
float f4[10];
double f8[10];
char bin[10][40];
char blob[10][80];
uint8_t u1[10];
uint16_t u2[10];
uint32_t u4[10];
uint64_t u8[10];
} v;
int32_t* t8_len = malloc(sizeof(int32_t) * 10);
int32_t* t16_len = malloc(sizeof(int32_t) * 10);
int32_t* t32_len = malloc(sizeof(int32_t) * 10);
int32_t* t64_len = malloc(sizeof(int32_t) * 10);
int32_t* float_len = malloc(sizeof(int32_t) * 10);
int32_t* double_len = malloc(sizeof(int32_t) * 10);
int32_t* bin_len = malloc(sizeof(int32_t) * 10);
int32_t* blob_len = malloc(sizeof(int32_t) * 10);
int32_t* u8_len = malloc(sizeof(int32_t) * 10);
int32_t* u16_len = malloc(sizeof(int32_t) * 10);
int32_t* u32_len = malloc(sizeof(int32_t) * 10);
int32_t* u64_len = malloc(sizeof(int32_t) * 10);
TAOS_MULTI_BIND params[14];
char is_null[10] = {0};
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].buffer_length = sizeof(v.ts[0]);
params[0].buffer = v.ts;
params[0].length = t64_len;
params[0].is_null = is_null;
params[0].num = 10;
params[1].buffer_type = TSDB_DATA_TYPE_BOOL;
params[1].buffer_length = sizeof(v.b[0]);
params[1].buffer = v.b;
params[1].length = t8_len;
params[1].is_null = is_null;
params[1].num = 10;
params[2].buffer_type = TSDB_DATA_TYPE_TINYINT;
params[2].buffer_length = sizeof(v.v1[0]);
params[2].buffer = v.v1;
params[2].length = t8_len;
params[2].is_null = is_null;
params[2].num = 10;
params[3].buffer_type = TSDB_DATA_TYPE_SMALLINT;
params[3].buffer_length = sizeof(v.v2[0]);
params[3].buffer = v.v2;
params[3].length = t16_len;
params[3].is_null = is_null;
params[3].num = 10;
params[4].buffer_type = TSDB_DATA_TYPE_INT;
params[4].buffer_length = sizeof(v.v4[0]);
params[4].buffer = v.v4;
params[4].length = t32_len;
params[4].is_null = is_null;
params[4].num = 10;
params[5].buffer_type = TSDB_DATA_TYPE_BIGINT;
params[5].buffer_length = sizeof(v.v8[0]);
params[5].buffer = v.v8;
params[5].length = t64_len;
params[5].is_null = is_null;
params[5].num = 10;
params[6].buffer_type = TSDB_DATA_TYPE_FLOAT;
params[6].buffer_length = sizeof(v.f4[0]);
params[6].buffer = v.f4;
params[6].length = float_len;
params[6].is_null = is_null;
params[6].num = 10;
params[7].buffer_type = TSDB_DATA_TYPE_DOUBLE;
params[7].buffer_length = sizeof(v.f8[0]);
params[7].buffer = v.f8;
params[7].length = double_len;
params[7].is_null = is_null;
params[7].num = 10;
params[8].buffer_type = TSDB_DATA_TYPE_BINARY;
params[8].buffer_length = sizeof(v.bin[0]);
params[8].buffer = v.bin;
params[8].length = bin_len;
params[8].is_null = is_null;
params[8].num = 10;
params[9].buffer_type = TSDB_DATA_TYPE_NCHAR;
params[9].buffer_length = sizeof(v.blob[0]);
params[9].buffer = v.blob;
params[9].length = blob_len;
params[9].is_null = is_null;
params[9].num = 10;
params[10].buffer_type = TSDB_DATA_TYPE_UTINYINT;
params[10].buffer_length = sizeof(v.u1[0]);
params[10].buffer = v.u1;
params[10].length = u8_len;
params[10].is_null = is_null;
params[10].num = 10;
params[11].buffer_type = TSDB_DATA_TYPE_USMALLINT;
params[11].buffer_length = sizeof(v.u2[0]);
params[11].buffer = v.u2;
params[11].length = u16_len;
params[11].is_null = is_null;
params[11].num = 10;
params[12].buffer_type = TSDB_DATA_TYPE_UINT;
params[12].buffer_length = sizeof(v.u4[0]);
params[12].buffer = v.u4;
params[12].length = u32_len;
params[12].is_null = is_null;
params[12].num = 10;
params[13].buffer_type = TSDB_DATA_TYPE_UBIGINT;
params[13].buffer_length = sizeof(v.u8[0]);
params[13].buffer = v.u8;
params[13].length = u64_len;
params[13].is_null = is_null;
params[13].num = 10;
// verify table names for upper/lower case
#define VERIFY_CNT 5
typedef struct {
char setTbName[20];
char showName[20];
char describeName[20];
char queryName[20];
char dropName[20];
} STbNames;
/**
* @brief
* 0 - success expected
* NonZero - fail expected
*/
typedef struct {
int32_t setTbName;
int32_t showName;
int32_t describeName;
int32_t queryName;
int32_t dropName;
} STbNamesResult;
STbNames tbName[VERIFY_CNT] = {0};
STbNamesResult tbNameResult[VERIFY_CNT] = {0};
STbNames* pTbName = NULL;
STbNamesResult* pTbNameResult = NULL;
pTbName = &tbName[0];
pTbNameResult = &tbNameResult[0];
strcpy(pTbName->setTbName, "Mn1");
strcpy(pTbName->showName, "mn1");
strcpy(pTbName->describeName, "mn1");
strcpy(pTbName->queryName, "mn1");
strcpy(pTbName->dropName, "mn1");
pTbName = &tbName[1];
pTbNameResult = &tbNameResult[1];
strcpy(pTbName->setTbName, "'Mn1'");
strcpy(pTbName->showName, "'mn1'");
strcpy(pTbName->describeName, "'mn1'");
strcpy(pTbName->queryName, "'mn1'");
strcpy(pTbName->dropName, "'mn1'");
pTbName = &tbName[2];
pTbNameResult = &tbNameResult[2];
strcpy(pTbName->setTbName, "\"Mn1\"");
strcpy(pTbName->showName, "\"mn1\"");
strcpy(pTbName->describeName, "\"mn1\"");
strcpy(pTbName->queryName, "\"mn1\"");
strcpy(pTbName->dropName, "\"mn1\"");
pTbName = &tbName[3];
pTbNameResult = &tbNameResult[3];
strcpy(pTbName->setTbName, "\"Mn1\"");
strcpy(pTbName->showName, "'mn1'");
strcpy(pTbName->describeName, "'mn1'");
strcpy(pTbName->queryName, "mn1");
strcpy(pTbName->dropName, "\"mn1\"");
pTbName = &tbName[4];
pTbNameResult = &tbNameResult[4];
strcpy(pTbName->setTbName, "`Mn1`");
strcpy(pTbName->showName, "Mn1"); // TODO support uniform of ``
strcpy(pTbName->describeName, "`Mn1`");
strcpy(pTbName->queryName, "`Mn1`");
strcpy(pTbName->dropName, "`Mn1`");
TAOS_STMT* stmt = NULL;
for (int n = 0; n < VERIFY_CNT; ++n) {
printf("\033[31m[%d] ===================================\033[0m\n", n);
pTbName = &tbName[n];
pTbNameResult = &tbNameResult[n];
char tmpStr[256] = {0};
// set table name
stmt = taos_stmt_init(taos);
if (!stmt) {
printf("\033[31m[%d] failed to execute taos_stmt_init. error:%s\033[0m\n", n);
exit(EXIT_FAILURE);
}
sql = "insert into ? using st1 tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
code = taos_stmt_prepare(stmt, sql, 0);
if (code != 0) {
printf("\033[31mfailed to execute taos_stmt_prepare. error:%s\033[0m\n", taos_stmt_errstr(stmt));
taos_stmt_close(stmt);
exit(EXIT_FAILURE);
}
printf("[%d] taos_stmt_set_tbname_tags, tbname=%s\n", n, pTbName->setTbName);
code = taos_stmt_set_tbname_tags(stmt, pTbName->setTbName, tags);
if ((!pTbNameResult->setTbName && (0 != code)) || (pTbNameResult->setTbName && (0 == code))) {
printf("\033[31m[%d] failed to execute taos_stmt_set_tbname_tags. error:%s\033[0m\n", n, taos_stmt_errstr(stmt));
taos_stmt_close(stmt);
exit(EXIT_FAILURE);
}
if (code == 0) {
int64_t ts = 1591060628000 + 1000 * n;
for (int i = 0; i < 10; ++i) {
v.ts[i] = ts++;
is_null[i] = 0;
v.b[i] = (int8_t)i % 2;
v.v1[i] = (int8_t)i;
v.v2[i] = (int16_t)(i * 2);
v.v4[i] = (int32_t)(i * 4);
v.v8[i] = (int64_t)(i * 8);
v.f4[i] = (float)(i * 40);
v.f8[i] = (double)(i * 80);
for (int j = 0; j < sizeof(v.bin[0]); ++j) {
v.bin[i][j] = (char)(i + '0');
}
strcpy(v.blob[i], "一二三四五六七八九十");
v.u1[i] = (uint8_t)i;
v.u2[i] = (uint16_t)(i * 2);
v.u4[i] = (uint32_t)(i * 4);
v.u8[i] = (uint64_t)(i * 8);
t8_len[i] = sizeof(int8_t);
t16_len[i] = sizeof(int16_t);
t32_len[i] = sizeof(int32_t);
t64_len[i] = sizeof(int64_t);
float_len[i] = sizeof(float);
double_len[i] = sizeof(double);
bin_len[i] = sizeof(v.bin[0]);
blob_len[i] = (int32_t)strlen(v.blob[i]);
u8_len[i] = sizeof(uint8_t);
u16_len[i] = sizeof(uint16_t);
u32_len[i] = sizeof(uint32_t);
u64_len[i] = sizeof(uint64_t);
}
taos_stmt_bind_param_batch(stmt, params);
taos_stmt_add_batch(stmt);
if (taos_stmt_execute(stmt) != 0) {
printf("\033[31m[%d] failed to execute insert statement.error:%s\033[0m\n", n, taos_stmt_errstr(stmt));
taos_stmt_close(stmt);
exit(EXIT_FAILURE);
}
}
taos_stmt_close(stmt);
// show the table
printf("[%d] show tables, tbName = %s\n", n, pTbName->showName);
stmt = taos_stmt_init(taos);
sprintf(tmpStr, "show tables like %s", pTbName->showName);
taos_stmt_prepare(stmt, tmpStr, 0);
code = taos_stmt_execute(stmt);
if ((!pTbNameResult->showName && (0 != code)) || (pTbNameResult->showName && (0 == code))) {
printf("\033[31m[%d] failed to execute show tables like. error:%s\033[0m\n", n, taos_stmt_errstr(stmt));
taos_stmt_close(stmt);
exit(EXIT_FAILURE);
}
taos_stmt_close(stmt);
// describe the table
printf("[%d] describe tables, tbName = %s\n", n, pTbName->describeName);
stmt = taos_stmt_init(taos);
sprintf(tmpStr, "describe %s", pTbName->describeName);
taos_stmt_prepare(stmt, tmpStr, 0);
code = taos_stmt_execute(stmt);
if ((!pTbNameResult->describeName && (0 != code)) || (pTbNameResult->describeName && (0 == code))) {
printf("\033[31m[%d] failed to execute describe tables. error:%s\033[0m\n", n, taos_stmt_errstr(stmt));
taos_stmt_close(stmt);
exit(EXIT_FAILURE);
}
taos_stmt_close(stmt);
// query the records
printf("[%d] select statement, tbName = %s\n", n, pTbName->queryName);
stmt = taos_stmt_init(taos);
sprintf(tmpStr, "SELECT * FROM %s", pTbName->queryName);
taos_stmt_prepare(stmt, tmpStr, 0);
TAOS_BIND qparams[2];
int8_t v1 = 5;
int16_t v2 = 15;
qparams[0].buffer_type = TSDB_DATA_TYPE_TINYINT;
qparams[0].buffer_length = sizeof(v1);
qparams[0].buffer = &v1;
qparams[0].length = &qparams[0].buffer_length;
qparams[0].is_null = NULL;
qparams[1].buffer_type = TSDB_DATA_TYPE_SMALLINT;
qparams[1].buffer_length = sizeof(v2);
qparams[1].buffer = &v2;
qparams[1].length = &qparams[1].buffer_length;
qparams[1].is_null = NULL;
taos_stmt_bind_param(stmt, qparams);
code = taos_stmt_execute(stmt);
if ((!pTbNameResult->queryName && (0 != code)) || (pTbNameResult->queryName && (0 == code))) {
printf("\033[31m[%d] failed to execute select statement.error:%s\033[0m\n", n, taos_stmt_errstr(stmt));
taos_stmt_close(stmt);
exit(EXIT_FAILURE);
}
result = taos_stmt_use_result(stmt);
TAOS_ROW row;
int rows = 0;
int num_fields = taos_num_fields(result);
TAOS_FIELD* fields = taos_fetch_fields(result);
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
char temp[256] = {0};
rows++;
taos_print_row(temp, row, fields, num_fields);
printf("[%d] row = %s\n", n, temp);
}
taos_free_result(result);
taos_stmt_close(stmt);
// drop table
printf("[%d] drop table, tbName = %s\n", n, pTbName->dropName);
stmt = taos_stmt_init(taos);
sprintf(tmpStr, "drop table %s", pTbName->dropName);
taos_stmt_prepare(stmt, tmpStr, 0);
code = taos_stmt_execute(stmt);
if ((!pTbNameResult->dropName && (0 != code)) || (pTbNameResult->dropName && (0 == code))) {
printf("\033[31m[%d] failed to drop table. error:%s\033[0m\n", n, taos_stmt_errstr(stmt));
taos_stmt_close(stmt);
exit(EXIT_FAILURE);
}
taos_stmt_close(stmt);
}
free(t8_len);
free(t16_len);
free(t32_len);
free(t64_len);
free(float_len);
free(double_len);
free(bin_len);
free(blob_len);
free(u8_len);
free(u16_len);
free(u32_len);
free(u64_len);
}
int main(int argc, char* argv[]) {
const char* host = "127.0.0.1";
const char* user = "root";
......@@ -880,5 +1416,7 @@ int main(int argc, char* argv[]) {
printf("************ verify prepare3 *************\n");
verify_prepare3(taos);
printf("************ verify prepare4 *************\n");
verify_prepare4(taos);
printf("************ verify end *************\n");
exit(EXIT_SUCCESS);
}
......@@ -7,4 +7,7 @@ ulimit -c unlimited
# python3 test.py -f restful/restful_bind_db2.py
python3 ./test.py -f client/nettest.py
python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_query.py
python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_insert.py
#======================p1-end===============
......@@ -50,6 +50,9 @@ sql insert into t3 values('2017-12-25 21:25:41', 3)
sql insert into t3 values('2017-12-25 21:26:41', 3)
sql insert into t3 values('2017-12-25 21:27:41', 3)
sql create table m3 (ts timestamp, col1 int, col2 float, txt binary(500))
sql insert into m3 values(now, 1, 2.0, 'HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS')
print =============== step2 - login
system_content curl 127.0.0.1:7111/grafana/
......@@ -179,4 +182,10 @@ if $system_content != @[{"refId":"A","target":"{count(v1):3}","datapoints":[[15.
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"refId":"A","alias":"taosd","sql":"select last(col1), last(col2), last(txt) from d1.m3 group by txt"}]' 127.0.0.1:7111/grafana/query
print 20-> $system_content
if $system_content != @[{"refId":"A","target":"taosd{last(col2):2.00000, last(txt):HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS HELLO TAOS}","datapoints":[[1,"-"]]}]@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
{"base_url": "127.0.0.1", "precision": "ms", "clear_data": true, "database_name": "db", "tbnum": 10, "data_row": 100, "case_file": "data_insert.csv", "basetime": 1639969683873, "all_case": false, "all_err": false, "all_current": true, "err_case": {"port_err": true, "api_err": true, "header_err": true, "db_tb_err": true, "data_err": true}, "current_case": {"port_current": true, "api_current": true, "header_current": true, "db_tb_current": true, "data_current": true}}
\ No newline at end of file
{"base_url": "127.0.0.1", "precision": "ms", "clear_data": true, "database_name": "db", "tbnum": 10, "data_row": 100, "basetime": 1639969706198, "all_case": false, "all_err": false, "all_current": true, "err_case": {"port_err": true, "api_err": true, "header_err": true, "sql_err": true}, "current_case": {"port_current": true, "api_current": true, "header_current": true, "sql_current": true}}
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import random
import inspect
import taos
import requests
import json
import traceback
import simplejson.errors
import csv
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class RestMsgInfo:
def __init__(self, base_url,
port=6041,
api_url="/rest/sql",
header={'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
):
self.base_url = base_url
self.port = port
self.api_url = api_url
self.header = header
self.full_url = f"http://{base_url}:{port}{api_url}"
class TDTestCase:
def __init__(self):
self.base_url = "127.0.0.1"
self.dbname = "db"
self.precision = "ms"
self.tbnum = 0
self.data_row = 0
self.basetime = 0
self.file = ""
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def caseDescription(self):
'''
case1 <cpwu>: create/alter/drop database/normal_table/child_table/stable \n
case2 <cpwu>: insert into table multiple records \n
case3 <cpwu>: insert multiple records into a given column \n
case4 <cpwu>: insert multiple records into multiple tables \n
case5 <cpwu>: automatically create a table when inserting, and specify a given tags column \n
case6 <cpwu>: insert with files \n
case7 <cpwu>: api_url test \n
case8 <cpwu>: base_url test \n
case9 <cpwu>: header test
'''
return
def rest_test_table(self, dbname: str, tbnum: int) -> None :
tdSql.execute(f"drop database if exists {dbname}")
tdSql.execute(f"create database if not exists {dbname} keep 3650 precision '{self.precision}' ")
tdSql.execute(f"use {dbname}")
tdSql.execute(
f'''
create stable {dbname}.stb1 (
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool,
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
)
tags(
tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool,
tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16)
)
'''
)
tdSql.execute(
f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(ttag1 int)"
)
for i in range(tbnum):
tdSql.execute(
f'''
create table {dbname}.t{i} using {dbname}.stb1
tags(
{i}, {i}, {1639032680000+i*10}, 'binary_{i}',{i},{random.choice([0, 1])}, {i},{i%32767},{i%127},'nchar_{i}'
)'''
)
tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})")
tdSql.execute(
f"create table {dbname}.nt1 (ts timestamp, c1 int, c2 float)"
)
tdSql.execute(
f"create table {dbname}.nt2 (ts timestamp, c1 int, c2 float)"
)
pass
def rest_test_data(self, tbnum:int, data_row:int, basetime:int) -> None :
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
f"insert into t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
def check_err_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
try:
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
if resp_code != 200:
tdLog.success(f"expect error occured, usrl: {url}, sql: {data}, error code is :{resp_code}")
return
status = resp["status"]
desc = resp["desc"]
if resp_code == 200 and status == "error":
tdLog.success(f"expect error occured, usrl: {url}, sql: {data}, error is :{desc}")
return
else:
tdLog.exit(f"expect error not occured")
except requests.exceptions.InvalidHeader as e:
print(f"expect error occured, request header error, header: {header}, error: {e}")
except requests.exceptions.InvalidURL as e:
print(f"expect error occured, request url error, url: {url}, error: {e}")
except requests.exceptions.ConnectionError as e:
print(f"expect error occured, request connection error,url: {url}, error: {e}")
except simplejson.errors.JSONDecodeError as e:
print(f"expect error occured, request json error,url: {url}, header: {header}, error: {e}")
except Exception as e:
print(f"expect error occured, url: {url}, header: {header}, {traceback.print_exc()}")
# finally:
# conn.close()
pass
def check_err_sql_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
try:
status = resp["status"]
desc = resp["desc"]
if resp_code == 200 and status == "error":
tdLog.success(f"expect error occured, url: {url}, error is :{desc}")
return
else:
tdLog.exit(f"expect error not occured")
except Exception as e:
tdLog.debug(f"url: {url}, resp: {resp} ")
traceback.print_exc()
raise e
def check_current_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
try:
status = resp["status"]
if resp_code == 200 and status == "succ":
tdLog.success(f"restfull run success! url:{url}")
else:
tdLog.exit(f"restful api test failed, url:{url}, sql: {data}, resp: {resp}")
except:
tdLog.debug(f"resp_code: {resp_code}, url: {url}, resp:{resp}")
traceback.print_exc()
raise
pass
def check_case_res_data(self, query_msg: RestMsgInfo, data):
url, header, api = query_msg.full_url, query_msg.header, query_msg.api_url
try:
ts_col = []
stb_list = [f"describe {self.dbname}.stb1", f"describe {self.dbname}.stb2"]
for stb in stb_list:
conn = requests.post(url=url, data=stb, headers=header)
resp = conn.json()
for col in resp["data"]:
if "TIMESTAMP" == col[1]:
ts_col.append(col[0])
check_column = []
conn = requests.post(url=url, data=data, headers=header)
resp = conn.json()
if len(resp["data"]) < 1:
return
for meta in resp["column_meta"]:
if meta[0] in ts_col:
check_column.append(meta[0])
if len(check_column) < 1:
return
if self.precision == "ms" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"):
return
except:
raise
pass
def db_tb_case_current(self):
# when version > 2.6, add the follow case:
# f"alter table {self.dbname}.tb1 add column c2 float",
# f"alter table {self.dbname}.tb1 drop column c2 ",
# f"alter table {self.dbname}.tb1 add column c2 float ; alter table {self.dbname}.tb1 drop column c2 ",
case_list = [
"create database if not exists db",
"create database if not exists db",
"create database if not exists db1",
"alter database db1 comp 2",
"alter database db1 keep 36500",
"drop database if exists db1",
"drop database if exists db1",
"drop database if exists db",
f"create database if not exists {self.dbname}",
f"create table if not exists {self.dbname}.tb1 (ts timestamp , c1 int)",
f"create table if not exists {self.dbname}.tb1 (ts timestamp , c1 float)",
f"create table if not exists {self.dbname}.stb1 (ts timestamp , c1 int) tags(tag1 int )",
f"create table if not exists {self.dbname}.stb1 (ts timestamp , c1 float) tags(tag2 int )",
f"create table if not exists {self.dbname}.stb2 (ts timestamp , c1 int) tags(tag1 int )",
f"create table if not exists {self.dbname}.stb3 (ts timestamp , c1 int) tags(tag1 int )",
f"create table if not exists {self.dbname}.tb2 using {self.dbname}.stb2 tags(2)",
f"create table if not exists {self.dbname}.tb3 using {self.dbname}.stb2 tags(2)",
f"drop table if exists {self.dbname}.tb2",
f"drop table if exists {self.dbname}.tb2",
f"drop table if exists {self.dbname}.stb2",
f"drop table if exists {self.dbname}.stb2",
f"drop table if exists {self.dbname}.t3",
f"drop table if exists {self.dbname}.stb3",
]
return case_list
def db_tb_case_err(self):
case_list = [
"create database if exists db",
f"drop database if not exists db",
f"drop database db3",
f"create table if exists {self.dbname}.t1 ",
f"create table if exists {self.dbname}.stb1 ",
f"drop table if not exists {self.dbname}.stb1 ",
f"drop table {self.dbname}.stb4 ",
f"create table if not exists {self.dbname}.stb2 (c1 int, c2 timestamp ) tags(tag1 int)",
f"create table if exists {self.dbname}.stb3 (ts timestamp ,c1 int) ",
f"create table if exists {self.dbname}.t2 (c1 int) "
]
return case_list
def data_case_current(self, tbnum:int, data_row:int, basetime: int, file:str):
case_list = []
body_list = []
row_times = data_row // 100
row_alone = data_row % 100
for i in range(row_times):
body = ""
for j in range(100):
body += f"(\
{basetime + (j+1)*10+ i*1000}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)},\
'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, \
{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' \
)"
body_list.append(body)
if row_alone != 0:
body = ""
for j in range(row_alone):
body += f"( \
{basetime + (j+1)*10+ row_times*1000}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, \
{basetime + random.randint(-200, -1)},'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, \
{random.randint(-200,-1)},{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' \
)"
body_list.append(body)
for i in range(tbnum):
pre_insert = f"insert into {self.dbname}.t{i} values "
for value_body in body_list:
insert_sql = pre_insert + value_body
case_list.append(insert_sql)
case_list.append(f'insert into {self.dbname}.nt1 values (now, 1, 1.0)')
case_list.append(f'insert into {self.dbname}.nt1 values ({basetime + 10}, 2, 2.0)')
case_list.append(f'insert into {self.dbname}.nt1 values ({basetime + 20}, 3, 3.0) {self.dbname}.nt2 values (now, 1, 1.0)')
case_list.append(f'insert into {self.dbname}.nt1 (ts, c2, c1) values ({basetime + 20}, 4.0, 4) ')
# exchange column order
case_list.append(f'insert into {self.dbname}.ct1 using {self.dbname}.stb1 (tag1) tags(1) (ts, c1) values (now, 1)')
# insert with file
if not os.path.isfile(file):
with open(file=file, mode="w", encoding="utf-8", newline="") as f:
for j in range(data_row):
writer = csv.writer(f)
data_line = [
basetime - (j + 1) * 10, random.randint(-200, -1), random.uniform(200, -1),
basetime + random.randint(-200, -1), f'"binary_{j}"', random.uniform(-200, -1),
random.choice([0, 1]), random.randint(-200, -1), random.randint(-200, -1),
random.randint(-127, -1), f'"nchar_{j}"'
]
writer.writerow(data_line)
case_list.append(f"insert into {self.dbname}.ct1 file {file}")
return case_list
pass
def data_case_err(self):
case_list = []
nowtime = int(round(time.time()*1000))
bigger_insert_sql = f"insert into {self.dbname}.nt1 values"
for i in range(40000):
bigger_insert_sql += f"({nowtime-i*10}, {i}, {i*1.0})"
case_list.append(bigger_insert_sql)
nodata_sql = f"insert into {self.dbname}.nt1 values()"
case_list.append(nodata_sql)
less_data_sql = f"insert into {self.dbname}.nt1 values(now)"
case_list.append(less_data_sql)
errtype_data_sql = f"insert into {self.dbname}.nt1 values(now+2, 1.0, 'binary_2')"
case_list.append(errtype_data_sql)
# insert into super table directly
insert_super_data_sql = f"insert into {self.dbname}.stb1 values(now+3, 1, 1.0)"
case_list.append(insert_super_data_sql)
return case_list
def port_case_current(self):
case_list = [6041]
return case_list
def port_case_err(self):
case_list = [
6030,
6051,
666666666,
None,
"abcd"
]
return case_list
def api_case_current(self):
case_List = [
"/rest/sql",
f"/rest/sql/{self.dbname}",
"/rest/sqlt",
f"/rest/sqlt/{self.dbname}",
"/rest/sqlutc",
f"/rest/sqlutc/{self.dbname}"
]
return case_List
def api_case_err(self):
case_list = [
"",
"/rest1/sql",
"/rest/sqlsqltsqlutc",
1,
["/rest", "/sql"],
"/influxdb/v1/write",
"/opentsdb/v1/put/json/db",
"/opentsdb/v1/put/telnet/db",
"/rest*",
"*"
]
return case_list
def header_case_current(self):
case_list = [
{'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='},
{'Authorization': 'Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04'}
]
return case_list
def header_case_err(self):
case_list = [
{'Authorization': 'Basic '},
{'Authorization': 'Taosd /root/taosdata'},
{'Authorization': True}
]
return case_list
def run_case_api_err(self):
err_cases = self.api_case_err()
count = 0
data = "create database if not exists db"
for case in err_cases:
print(f"err api case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, api_url=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_port_err(self):
err_cases = self.port_case_err()
count = 0
data = "create database if not exists db"
for case in err_cases:
print(f"err port case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, port=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_header_err(self):
err_cases = self.header_case_err()
count = 0
data = "create database if not exists db"
for case in err_cases:
print(f"err header case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, header=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_db_tb_err(self):
err_cases = self.db_tb_case_err()
count = 0
query_msg = RestMsgInfo(base_url=self.base_url)
for case in err_cases:
print(f"err create db/tb case{count}: ", end="")
self.check_err_sql_case(query_msg=query_msg, data=case)
count += 1
pass
def run_case_data_err(self):
err_cases = self.data_case_err()
count = 0
tdSql.execute(f"drop database if exists {self.dbname}")
tdSql.execute(f"create database if not exists {self.dbname} keep 3650 precision '{self.precision}' ")
tdSql.execute(f"use {self.dbname}")
tdSql.execute(
f'''
create stable {self.dbname}.stb1 (
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool,
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
)
tags(
tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool,
tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16)
)
'''
)
query_msg = RestMsgInfo(base_url=self.base_url)
for case in err_cases:
print(f"err insert data case{count}: ", end="")
self.check_err_sql_case(query_msg=query_msg, data=case)
count += 1
tdSql.execute(f"drop database if exists {self.dbname}")
pass
def run_case_port_current(self):
current_cases = self.port_case_current()
count = 0
data = "create database if not exists db"
for case in current_cases:
print(f"current port case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, port=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_api_current(self):
current_cases = self.api_case_current()
count = 0
data = "create database if not exists db"
for case in current_cases:
print(f"current api case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, api_url=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_header_current(self):
current_cases = self.header_case_current()
count = 0
data = "create database if not exists db"
for case in current_cases:
print(f"current header case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, header=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_db_tb_current(self):
current_cases = self.db_tb_case_current()
count = 0
for case in current_cases:
print(f"current insert db/tb case{count}: ", end="")
for api in ["/rest/sql", "/rest/sqlt", "/rest/sqlutc"]:
query_msg = RestMsgInfo(base_url=self.base_url, api_url=api)
self.check_current_case(query_msg=query_msg, data=case)
count += 1
pass
def run_case_data_current(self):
self.rest_test_table(dbname=self.dbname, tbnum=self.tbnum)
current_cases = self.data_case_current(tbnum=self.tbnum, data_row=self.data_row, basetime=self.basetime, file=self.file)
count = 0
print(current_cases[12])
api_cases = self.api_case_current()
for case in current_cases:
print(f"current insert data case{count}: ", end="")
for api in api_cases:
query_msg = RestMsgInfo(base_url=self.base_url, api_url=api)
self.check_current_case(query_msg=query_msg, data=case)
count += 1
pass
def run_case_err(self):
self.run_case_api_err()
self.run_case_port_err()
self.run_case_header_err()
self.run_case_db_tb_err()
self.run_case_data_err()
pass
def run_case_current(self):
self.run_case_api_current()
self.run_case_port_current()
self.run_case_header_current()
self.run_case_db_tb_current()
self.run_case_data_current()
pass
def run_all_case(self):
self.run_case_err()
self.run_case_current()
pass
def set_default_args(self):
nowtime = int(round(time.time() * 1000))
url = "127.0.0.1"
per_table_rows = 100
tbnum = 10
database_name = "db"
precision ="ms"
clear_data = True
insert_case_filename = "data_insert.csv"
config_default = {
"base_url" : url,
"precision" : precision,
"clear_data" : clear_data,
"database_name" : database_name,
"tbnum" : tbnum,
"data_row" : per_table_rows,
"case_file" : insert_case_filename,
"basetime" : nowtime,
"all_case" : False,
"all_err" : False,
"all_current" : True,
"err_case" : {
"port_err" : True,
"api_err" : True,
"header_err" : True,
"db_tb_err" : True,
"data_err" : True,
},
"current_case" : {
"port_current" : True,
"api_current" : True,
"header_current" : True,
"db_tb_current" : True,
"data_current" : True,
}
}
config_file_name = f"{os.path.dirname(os.path.abspath(__file__))}/rest_insert_config.json"
with open(config_file_name, "w") as f:
json.dump(config_default, f)
return config_file_name
def run(self):
config_file = f"{os.path.dirname(os.path.abspath(__file__))}/rest_insert_config.json"
if not os.path.isfile(config_file):
config_file = self.set_default_args()
with open(config_file, "r", encoding="utf-8") as f:
cfg = json.load(f)
self.tbnum = cfg["tbnum"]
self.data_row = cfg["data_row"]
self.basetime = cfg["basetime"]
self.dbname = cfg["database_name"]
self.base_url = cfg["base_url"]
self.precision = cfg["precision"]
self.file = cfg["case_file"]
clear_data = True if cfg["clear_data"] else False
if clear_data:
self.rest_test_table(dbname=self.dbname, tbnum=self.tbnum)
run_all_case = True if cfg["all_case"] else False
run_all_err_case = True if cfg["all_err"] else False
run_all_current_case = True if cfg["all_current"] else False
run_port_err_case = True if cfg["err_case"]["port_err"] else False
run_api_err_case = True if cfg["err_case"]["api_err"] else False
run_header_err_case = True if cfg["err_case"]["header_err"] else False
run_db_tb_err_case = True if cfg["err_case"]["db_tb_err"] else False
run_data_err_case = True if cfg["err_case"]["data_err"] else False
run_port_current_case = True if cfg["current_case"]["port_current"] else False
run_api_current_case = True if cfg["current_case"]["api_current"] else False
run_header_current_case = True if cfg["current_case"]["header_current"] else False
run_db_tb_current_case = True if cfg["current_case"]["db_tb_current"] else False
run_data_current_case = True if cfg["current_case"]["data_current"] else False
print("run_all_case:" ,run_all_case)
print("run_all_err_case:" ,run_all_err_case)
print("run_all_current_case:" ,run_all_current_case)
print("run_port_err_case:" ,run_port_err_case)
print("run_api_err_case:" ,run_api_err_case)
print("run_header_err_case:" ,run_header_err_case)
print("run_db_tb_err_case:" ,run_db_tb_err_case)
print("run_data_err_case:" ,run_data_err_case)
print("run_port_current_case:" ,run_port_current_case)
print("run_api_current_case:" ,run_api_current_case)
print("run_header_current_case:" ,run_header_current_case)
print("run_db_tb_current_case:" ,run_db_tb_current_case)
print("run_data_current_case:" ,run_data_current_case)
if not (run_all_err_case | run_all_current_case | run_port_err_case | run_api_err_case | run_header_err_case |
run_db_tb_err_case | run_data_err_case | run_port_current_case | run_api_current_case |
run_header_current_case | run_db_tb_current_case | run_data_current_case ):
run_all_case = True
if run_all_err_case & run_all_current_case:
run_all_case = True
if run_all_case:
self.run_all_case()
return
if run_all_err_case :
self.run_case_err()
return
if run_all_current_case:
self.run_case_current()
return
if run_port_err_case:
self.run_case_port_err()
if run_api_err_case:
self.run_case_api_err()
if run_header_err_case:
self.run_case_header_err()
if run_db_tb_err_case:
self.run_case_db_tb_err()
if run_data_err_case:
self.run_case_data_err()
if run_port_current_case:
self.run_case_port_current()
if run_api_current_case:
self.run_case_api_current()
if run_header_current_case:
self.run_case_header_current()
if run_db_tb_current_case:
self.run_case_db_tb_current()
if run_data_current_case:
self.run_case_data_current()
pass
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import random
import inspect
import taos
import requests
import json
import traceback
import simplejson.errors
import math
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
from collections import defaultdict
class RestMsgInfo:
def __init__(self, base_url,
port=6041,
api_url="/rest/sql",
header={'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
):
self.base_url = base_url
self.port = port
self.api_url = api_url
self.header = header
self.full_url = f"http://{base_url}:{port}{api_url}"
class TDTestCase:
def __init__(self):
self.base_url = "127.0.0.1"
self.dbname = "db"
self.precision = "ms"
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def caseDescription(self):
'''
case1 <cpwu>: specified SQL
case2 <cpwu>: select sql,include stable 、child table and normal table, include correct SQL and invalid SQL \n
case3 <cpwu>: port test \n
case4 <cpwu>: api_url test \n
case5 <cpwu>: base_url test \n
case6 <cpwu>: header test \n
case7 <cpwu>: big data test
'''
return
def rest_test_table(self, dbname: str, tbnum: int) -> None :
tdSql.execute(f"drop database if exists {dbname}")
tdSql.execute(f"create database if not exists {dbname} keep 3650 precision '{self.precision}' ")
tdSql.execute(f"use {dbname}")
tdSql.execute(
f'''
create stable {dbname}.stb1 (
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool,
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
)
tags(
tag1 int, tag2 float, tag3 timestamp, tag4 binary(16), tag5 double, tag6 bool,
tag7 bigint, tag8 smallint, tag9 tinyint, tag10 nchar(16)
)
'''
)
tdSql.execute(
f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(ttag1 int)"
)
for i in range(tbnum):
tdSql.execute(
f'''
create table {dbname}.t{i} using {dbname}.stb1
tags({i}, {i}, {1639032680000+i*10}, 'binary_{i}',{i},{random.choice([0, 1])}, {i},{i%32767},{i%127},'nchar_{i}')
'''
)
tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})")
pass
def rest_test_data(self, tbnum:int, data_row:int, basetime:int) -> None :
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
f"insert into t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
def check_err_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
try:
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
if resp_code != 200:
print(f"expect error occured, url: {url}, sql: {data}, error code is :{resp_code}")
return
status = resp["status"]
desc = resp["desc"]
if resp_code == 200 and status == "error":
print(f"expect error occured, url: {url}, sql: {data}, error is :{desc}")
return
else:
tdLog.exit(f"expect error not occured")
except requests.exceptions.InvalidHeader as e:
tdLog.success(f"expect error occured, request header error, header: {header}, error: {e}")
except requests.exceptions.InvalidURL as e:
tdLog.success(f"expect error occured, request url error, url: {url}, error: {e}")
except requests.exceptions.ConnectionError as e:
tdLog.success(f"expect error occured, request connection error,url: {url}, error: {e}")
except simplejson.errors.JSONDecodeError as e:
tdLog.success(f"expect error occured, request json error,url: {url}, header: {header}, error: {e}")
except Exception as e:
tdLog.success(f"expect error occured, url: {url}, header: {header}, {traceback.print_exc()}")
# finally:
# conn.close()
pass
def check_err_sql_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
try:
conn = requests.post(url=url, data=data, headers=header)
resp_code = conn.status_code
resp = conn.json()
status = resp["status"]
desc = resp["desc"]
if resp_code == 200 and status == "error":
tdLog.success(f"expect error occured, url: {url}, sql: {data}, error is :{desc}")
return
else:
tdLog.exit(f"expect error not occured")
except Exception as e:
traceback.print_exc()
raise e
def check_current_case(self,query_msg: RestMsgInfo, data):
url, header = query_msg.full_url, query_msg.header
conn = requests.post(url=url, data=data, headers=header)
try:
resp_code = conn.status_code
resp = conn.json()
status = resp["status"]
if resp_code == 200 and status == "succ":
tdLog.printNoPrefix(f"restfull run success! url:{url}, sql: {data}")
else:
tdLog.exit(f"restful api test failed, url:{url}, sql: {data}")
except:
tdLog.debug(f"resp_code: {resp_code}, url: {url}")
traceback.print_exc()
raise
pass
def check_case_res_data(self, query_msg: RestMsgInfo, data):
url, header, api = query_msg.full_url, query_msg.header, query_msg.api_url
try:
ts_col = []
stb_list = [f"describe {self.dbname}.stb1", f"describe {self.dbname}.stb2"]
for stb in stb_list:
conn = requests.post(url=url, data=stb, headers=header)
resp = conn.json()
for col in resp["data"]:
if "TIMESTAMP" == col[1]:
ts_col.append(col[0])
index_dict = defaultdict(int)
conn = requests.post(url=url, data=data, headers=header)
resp = conn.json()
if resp["data"] is None:
return
for index, meta in enumerate(resp["column_meta"]):
if meta[0] in ts_col:
index_dict[meta[0]] = index
if len(index_dict) < 1:
return
if self.precision == "ms" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) !=23:
print(res_data)
tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}")
return
if self.precision == "ms" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 12:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
if self.precision == "ms" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) != 29 and len(res_data[col_index]) != 28 and len(res_data[col_index]) != 27 and len(res_data[col_index]) != 25:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}, length is: {len(res_data[col_index])}"
)
return
if self.precision == "us" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) !=26:
print(res_data)
tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}")
return
if self.precision == "us" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 15:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
if self.precision == "us" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) != 32 and len(res_data[col_index]) != 31 and len(res_data[col_index]) != 30 and len(res_data[col_index]) != 28:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
if self.precision == "ns" and (api == "/rest/sql" or api == f"/rest/sql/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) !=29:
print(res_data)
tdLog.exit(f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}")
return
if self.precision == "ns" and (api == "/rest/sqlt" or api == f"/rest/sqlt/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if not isinstance(res_data[col_index], int) or round(math.log10(res_data[col_index])) != 18:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
if self.precision == "ns" and (api == "/rest/sqlutc" or api == f"/rest/sqlutc/{self.dbname}"):
for col_name, col_index in index_dict.items():
for res_data in resp["data"]:
if len(res_data[col_index]) != 35 and len(res_data[col_index]) != 34 and len(res_data[col_index]) != 33 and len(res_data[col_index]) != 31:
print(res_data)
tdLog.exit(
f"restful timestamp column err, url:{url}, sql: {data},result is: {res_data[col_index]}"
)
return
except:
traceback.print_exc()
raise
pass
def sql_case_current(self):
case_list = [
"show databases",
f"show {self.dbname}.stables",
f"show {self.dbname}.tables",
"select server_status()",
"select client_version()",
"select server_version()",
"select database()",
f"show create database {self.dbname}",
f"show create stable {self.dbname}.stb1",
f"select * from {self.dbname}.stb1",
f"select ts from {self.dbname}.stb1",
f"select _c0 from {self.dbname}.stb1",
f"select c1 from {self.dbname}.stb1",
f"select c2 from {self.dbname}.stb1",
f"select c3 from {self.dbname}.stb1",
f"select c4 from {self.dbname}.stb1",
f"select c5 from {self.dbname}.stb1",
f"select c6 from {self.dbname}.stb1",
f"select c7 from {self.dbname}.stb1",
f"select c8 from {self.dbname}.stb1",
f"select c9 from {self.dbname}.stb1",
f"select c10 from {self.dbname}.stb1",
f"select tbname from {self.dbname}.stb1",
f"select tag1 from {self.dbname}.stb1",
f"select tag2 from {self.dbname}.stb1",
f"select tag3 from {self.dbname}.stb1",
f"select tag4 from {self.dbname}.stb1",
f"select tag5 from {self.dbname}.stb1",
f"select tag6 from {self.dbname}.stb1",
f"select tag7 from {self.dbname}.stb1",
f"select tag8 from {self.dbname}.stb1",
f"select tag9 from {self.dbname}.stb1",
f"select tag10 from {self.dbname}.stb1",
f"select count(*) from {self.dbname}.stb1",
f"select count(c1) from {self.dbname}.stb1",
f"select avg(c1) from {self.dbname}.stb1",
f"select twa(c1) from {self.dbname}.stb1 group by tbname",
f"select sum(c1) from {self.dbname}.stb1",
f"select stddev(c1) from {self.dbname}.stb1",
f"select min(c1) from {self.dbname}.stb1",
f"select max(c1) from {self.dbname}.stb1",
f"select first(c1) from {self.dbname}.stb1",
f"select first(*) from {self.dbname}.stb1",
f"select last(c1) from {self.dbname}.stb1",
f"select last(*) from {self.dbname}.stb1",
f"select top(c1, 3) from {self.dbname}.stb1",
f"select bottom(c1, 3) from {self.dbname}.stb1",
f"select apercentile(c1, 50, 't-digest') from {self.dbname}.stb1",
f"select last_row(c1) from {self.dbname}.stb1",
f"select last_row(*) from {self.dbname}.stb1",
f"select interp(c1) from {self.dbname}.stb1 where ts=0 group by tbname",
f"select interp(c1) from {self.dbname}.stb1 where ts=0 fill(next) group by tbname",
f"select interp(c1) from {self.dbname}.stb1 where ts>0 and ts <100000000 every(5s) group by tbname",
f"select diff(c1) from {self.dbname}.stb1 group by tbname",
f"select derivative(c1, 10m, 0) from {self.dbname}.stb1 group by tbname",
f"select derivative(c1, 10m, 1) from {self.dbname}.stb1 group by tbname",
f"select spread(c1) from {self.dbname}.stb1",
f"select ceil(c1) from {self.dbname}.stb1",
f"select floor(c1) from {self.dbname}.stb1",
f"select round(c1) from {self.dbname}.stb1",
f"select c1*2+2%c2-c2/2 from {self.dbname}.stb1",
f"select max(c1) from {self.dbname}.stb1 where ts>'2021-12-05 18:25:41.136' and ts<'2021-12-05 18:25:44.13' interval(1s) sliding(500a) fill(NULL) group by tbname",
f"select max(c1) from {self.dbname}.stb1 where (c1 >=0 and c1 <> 0 and c2 is not null or c1 < -1 or (c2 between 1 and 10) ) and tbname like 't_' ",
f"select max(c1) from {self.dbname}.stb1 group by tbname order by ts desc slimit 2 soffset 2 limit 1 offset 0",
f"select max(c1) from {self.dbname}.stb1 group by c6 order by ts desc slimit 1 soffset 1 limit 1 offset 0 ",
f"select * from {self.dbname}.t1",
f"select ts from {self.dbname}.t1",
f"select _c0 from {self.dbname}.t1",
f"select c1 from {self.dbname}.t1",
f"select c2 from {self.dbname}.t1",
f"select c3 from {self.dbname}.t1",
f"select c4 from {self.dbname}.t1",
f"select c5 from {self.dbname}.t1",
f"select c6 from {self.dbname}.t1",
f"select c7 from {self.dbname}.t1",
f"select c8 from {self.dbname}.t1",
f"select c9 from {self.dbname}.t1",
f"select c10 from {self.dbname}.t1",
f"select tbname from {self.dbname}.t1",
f"select tag1 from {self.dbname}.t1",
f"select tag2 from {self.dbname}.t1",
f"select tag3 from {self.dbname}.t1",
f"select tag4 from {self.dbname}.t1",
f"select tag5 from {self.dbname}.t1",
f"select tag6 from {self.dbname}.t1",
f"select tag7 from {self.dbname}.t1",
f"select tag8 from {self.dbname}.t1",
f"select tag9 from {self.dbname}.t1",
f"select tag10 from {self.dbname}.t1",
f"select count(*) from {self.dbname}.t1",
f"select count(c1) from {self.dbname}.t1",
f"select avg(c1) from {self.dbname}.t1",
f"select twa(c1) from {self.dbname}.t1",
f"select sum(c1) from {self.dbname}.t1",
f"select stddev(c1) from {self.dbname}.t1",
f"select leastsquares(c1, 1, 1) from {self.dbname}.t1",
f"select min(c1) from {self.dbname}.t1",
f"select max(c1) from {self.dbname}.t1",
f"select first(c1) from {self.dbname}.t1",
f"select first(*) from {self.dbname}.t1",
f"select last(c1) from {self.dbname}.t1",
f"select last(*) from {self.dbname}.t1",
f"select top(c1, 3) from {self.dbname}.t1",
f"select bottom(c1, 3) from {self.dbname}.t1",
f"select percentile(c1, 50) from {self.dbname}.t1",
f"select apercentile(c1, 50, 't-digest') from {self.dbname}.t1",
f"select last_row(c1) from {self.dbname}.t1",
f"select last_row(*) from {self.dbname}.t1",
f"select interp(c1) from {self.dbname}.t1 where ts=0 ",
f"select interp(c1) from {self.dbname}.t1 where ts=0 fill(next)",
f"select interp(c1) from {self.dbname}.t1 where ts>0 and ts <100000000 every(5s)",
f"select diff(c1) from {self.dbname}.t1",
f"select derivative(c1, 10m, 0) from {self.dbname}.t1",
f"select derivative(c1, 10m, 1) from {self.dbname}.t1",
f"select spread(c1) from {self.dbname}.t1",
f"select ceil(c1) from {self.dbname}.t1",
f"select floor(c1) from {self.dbname}.t1",
f"select round(c1) from {self.dbname}.t1",
f"select c1*2+2%c2-c2/2 from {self.dbname}.t1",
f"select max(c1) from {self.dbname}.t1 where ts>'2021-12-05 18:25:41.136' and ts<'2021-12-05 18:25:44.13' interval(1s) sliding(500a) fill(NULL)",
f"select max(c1) from {self.dbname}.t1 where (c1 >=0 and c1 <> 0 and c2 is not null or c1 < -1 or (c2 between 1 and 10) ) and c10 like 'nchar___1' ",
f"select max(c1) from {self.dbname}.t1 group by c6 order by ts desc ",
f"select stb1.c1, stb2.c1 from {self.dbname}.stb1 stb1, {self.dbname}.stb2 stb2 where stb1.ts=stb2.ts and stb1.tag1=stb2.ttag1",
f"select t1.c1, t2.c1 from {self.dbname}.t1 t1, {self.dbname}.t2 t2 where t1.ts=t2.ts",
f"select c1 from (select c2 c1 from {self.dbname}.stb1) ",
f"select c1 from {self.dbname}.t1 union all select c1 from {self.dbname}.t2"
]
return case_list
def sql_case_err(self):
case_list = [
"show database",
f"select percentile(c1, 50) from {self.dbname}.stb1 group by tbname",
f"select leastsquares(c1, 1, 1) from {self.dbname}.stb1",
]
return case_list
def port_case_current(self):
case_list = [6041]
return case_list
def port_case_err(self):
case_list = [
6030,
6051,
666666666,
None,
"abcd"
]
return case_list
def api_case_current(self):
case_List = [
"/rest/sql",
f"/rest/sql/{self.dbname}",
"/rest/sqlt",
f"/rest/sqlt/{self.dbname}",
"/rest/sqlutc",
f"/rest/sqlutc/{self.dbname}"
]
return case_List
def api_case_err(self):
case_list = [
"",
"/rest1/sql",
"/rest/sqlsqltsqlutc",
1,
["/rest", "/sql"],
"/influxdb/v1/write",
"/opentsdb/v1/put/json/db",
"/opentsdb/v1/put/telnet/db",
"/rest*",
"*"
]
return case_list
def header_case_current(self):
case_list = [
{'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='},
{'Authorization': 'Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04'}
]
return case_list
def header_case_err(self):
case_list = [
{'Authorization': 'Basic '},
{'Authorization': 'Taosd /root/taosdata'},
{'Authorization': True}
]
return case_list
def run_case_api_err(self):
err_cases = self.api_case_err()
count = 0
data = "show databases"
for case in err_cases:
print(f"err api case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, api_url=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_port_err(self):
err_cases = self.port_case_err()
count = 0
data = "show databases"
for case in err_cases:
print(f"err port case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, port=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_header_err(self):
err_cases = self.header_case_err()
count = 0
data = "show databases"
for case in err_cases:
print(f"err header case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, header=case)
self.check_err_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_sql_err(self):
err_cases = self.sql_case_err()
count = 0
for case in err_cases:
print(f"err sql case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url)
self.check_err_sql_case(query_msg=query_msg, data=case)
count += 1
pass
def run_case_port_current(self):
current_cases = self.port_case_current()
count = 0
data = "show databases"
for case in current_cases:
print(f"current port case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, port=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_api_current(self):
current_cases = self.api_case_current()
count = 0
data = "show databases"
for case in current_cases:
print(f"current api case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, api_url=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_header_current(self):
current_cases = self.header_case_current()
count = 0
data = "show databases"
for case in current_cases:
print(f"current header case{count}: ", end="")
query_msg = RestMsgInfo(base_url=self.base_url, header=case)
self.check_current_case(query_msg=query_msg, data=data)
count += 1
pass
def run_case_sql_current(self):
current_cases = self.sql_case_current()
count = 0
api_cases = self.api_case_current()
for case in current_cases:
print(f"current sql case{count}: ", end="")
for api in api_cases:
query_msg = RestMsgInfo(base_url=self.base_url, api_url=api)
self.check_current_case(query_msg=query_msg, data=case)
self.check_case_res_data(query_msg=query_msg, data=case)
count += 1
pass
def run_case_err(self):
self.run_case_api_err()
self.run_case_port_err()
self.run_case_header_err()
self.run_case_sql_err()
pass
def run_case_current(self):
self.run_case_api_current()
self.run_case_port_current()
self.run_case_header_current()
self.run_case_sql_current()
pass
def run_all_case(self):
self.run_case_err()
self.run_case_current()
pass
def set_default_args(self):
nowtime = int(round(time.time() * 1000))
url = "127.0.0.1"
per_table_rows = 100
tbnum = 10
database_name = "db"
precision ="ms"
clear_data = True
config_default = {
"base_url" : url,
"precision" : precision,
"clear_data" : clear_data,
"database_name": database_name,
"tbnum" : tbnum,
"data_row" : per_table_rows,
"basetime" : nowtime,
"all_case" : False,
"all_err" : False,
"all_current" : True,
"err_case" : {
"port_err" : True,
"api_err" : True,
"header_err" : True,
"sql_err" : True,
},
"current_case" : {
"port_current" : True,
"api_current" : True,
"header_current" : True,
"sql_current" : True,
}
}
config_file_name = f"{os.path.dirname(os.path.abspath(__file__))}/rest_query_config.json"
with open(config_file_name, "w") as f:
json.dump(config_default, f)
return config_file_name
def run(self):
config_file = f"{os.path.dirname(os.path.abspath(__file__))}/rest_query_config.json"
if not os.path.isfile(config_file):
config_file = self.set_default_args()
with open(config_file, "r", encoding="utf-8") as f:
cfg = json.load(f)
tbnum = cfg["tbnum"]
data_row = cfg["data_row"]
basetime = cfg["basetime"]
self.dbname = cfg["database_name"]
self.base_url = cfg["base_url"]
self.precision = cfg["precision"]
clear_data = True if cfg["clear_data"] else False
if clear_data:
self.rest_test_table(dbname=self.dbname, tbnum=tbnum)
self.rest_test_data(tbnum=tbnum, data_row=data_row, basetime=basetime)
run_all_case = True if cfg["all_case"] else False
run_all_err_case = True if cfg["all_err"] else False
run_all_current_case = True if cfg["all_current"] else False
run_port_err_case = True if cfg["err_case"]["port_err"] else False
run_api_err_case = True if cfg["err_case"]["api_err"] else False
run_header_err_case = True if cfg["err_case"]["header_err"] else False
run_sql_err_case = True if cfg["err_case"]["sql_err"] else False
run_port_current_case = True if cfg["current_case"]["port_current"] else False
run_api_current_case = True if cfg["current_case"]["api_current"] else False
run_header_current_case = True if cfg["current_case"]["header_current"] else False
run_sql_current_case = True if cfg["current_case"]["sql_current"] else False
if not (run_all_err_case | run_all_current_case | run_port_err_case | run_api_err_case |
run_header_err_case | run_sql_err_case | run_port_current_case | run_api_current_case
| run_header_current_case | run_sql_current_case):
run_all_case = True
if run_all_err_case & run_all_current_case:
run_all_case = True
if run_all_case:
self.run_all_case()
return
if run_all_err_case :
self.run_case_err()
return
if run_all_current_case:
self.run_case_current()
return
if run_port_err_case:
self.run_case_port_err()
if run_api_err_case:
self.run_case_api_err()
if run_header_err_case:
self.run_case_header_err()
if run_sql_err_case:
self.run_case_sql_err()
if run_port_current_case:
self.run_case_port_current()
if run_api_current_case:
self.run_case_api_current()
if run_header_current_case:
self.run_case_header_current()
if run_sql_current_case:
self.run_case_sql_current()
pass
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
python3 test.py -f 1-insert/0-sql/basic.py
python3 test.py -f 0-management/1-stable/create_col_tag.py
python3 test.py -f 4-taosAdapter/taosAdapter_query.py
python3 test.py -f 4-taosAdapter/taosAdapter_insert.py
#python3 test.py -f 2-query/9-others/TD-11389.py # this case will run when this bug fix TD-11389
\ No newline at end of file
#python3 test.py -f 2-query/9-others/TD-11389.py # this case will run when this bug fix TD-11389
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册