diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index b2f335e1f7b8a30a46713a322c2ef95b5605c9d9..ba937b40c14305dbf45ccc81db9bb79ed9d89cb5 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG cb1e89c + GIT_TAG e02ddb2 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 0110b27b325e9479455bbbf1e8c77e73f69d950d..d8bf3a09b4a73b8985ba31d408a19a73cd480e47 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 149ac34 + GIT_TAG 0681d8b SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md index a695a2cae18f28e090816ec98a978674a028df30..f4606f263fbda42e8e221d067912b884a3be4a40 100644 --- a/docs/en/12-taos-sql/29-changes.md +++ b/docs/en/12-taos-sql/29-changes.md @@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables. | - | :------- | :-------- | :------- | | 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported." | 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes. -| 3 | ALTER DATABASE | Modified | Deprecated +| 3 | ALTER DATABASE | Modified | Deprecated | 4 | ALTER STABLE | Modified | Deprecated | 5 | ALTER TABLE | Modified | Deprecated | 6 | ALTER USER | Modified | Deprecated diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index 1691b8be8b53480199898ab4ec6b5917eba7979f..409e079b92d5c8375ce6b3da60c0b2aa053ef662 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -382,6 +382,130 @@ Response body: } ``` +## REST API between TDengine 2.x and 3.0 + +### URI + +| URI | TDengine 2.x | TDengine 3.0 | +| :--------------------| :------------------: | :--------------------------------------------------: | +| /rest/sql | Supported | Supported (with different response code and body) | +| /rest/sqlt | Supported | No more supported | +| /rest/sqlutc | Supported | No more supported | + +### HTTP code + +| HTTP code | TDengine 2.x | TDengine 3.0 | note | +| :--------------------| :------------------: | :----------: | :-----------------------------------: | +| 200 | Supported | Supported | Success or taosc return error | +| 400 | Not supported | Supported | Parameter error | +| 401 | Not supported | Supported | Authentication failure | +| 404 | Supported | Supported | URI not exist | +| 500 | Not supported | Supported | Internal error | +| 503 | Supported | Supported | Insufficient system resources | + +### Response body + +#### REST response body return from TDengine 2.x + +```JSON +{ + "status": "succ", + "head": [ + "name", + "created_time", + "ntables", + "vgroups", + "replica", + "quorum", + "days", + "keep1,keep2,keep(D)", + "cache(MB)", + "blocks", + "minrows", + "maxrows", + "wallevel", + "fsync", + "comp", + "precision", + "status" + ], + "data": [ + [ + "log", + "2020-09-02 17:23:00.039", + 4, + 1, + 1, + 1, + 10, + "30,30,30", + 1, + 3, + 100, + 4096, + 1, + 3000, + 2, + "us", + "ready" + ] + ], + "rows": 1 +} +``` +``` + "data": [ + [ + "information_schema", + 16, + "ready" + ], + [ + "performance_schema", + 9, + "ready" + ] + ], +``` + +#### REST response body return from TDengine 3.0 + +```JSON +{ + "code": 0, + "column_meta": [ + [ + "name", + "VARCHAR", + 64 + ], + [ + "ntables", + "BIGINT", + 8 + ], + [ + "status", + "VARCHAR", + 10 + ] + ], + "data": [ + [ + "information_schema", + 16, + "ready" + ], + [ + "performance_schema", + 9, + "ready" + ] + ], + "rows": 2 +} +``` + ## Reference [taosAdapter](/reference/taosadapter/) diff --git a/docs/en/14-reference/03-connector/_category_.yml b/docs/en/14-reference/03-connector/_category_.yml index e470f64aa013b137f05f03db112641faf2956297..6a766e96574844db87a70ff7eb5f0005cb6acbfb 100644 --- a/docs/en/14-reference/03-connector/_category_.yml +++ b/docs/en/14-reference/03-connector/_category_.yml @@ -1 +1 @@ -label: "connector" \ No newline at end of file +label: "Connector" diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md index 3f75364081d7ec242d96a30f3adf0861637a06eb..aad0e63a4228ca303302d4a3970182355f750d53 100644 --- a/docs/en/14-reference/13-schemaless/13-schemaless.md +++ b/docs/en/14-reference/13-schemaless/13-schemaless.md @@ -3,13 +3,11 @@ title: Schemaless Writing description: This document describes how to use the schemaless write component of TDengine. --- -In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing -will automatically add the required columns to ensure that the data written by the user is stored correctly. +In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability. -Tips: -The schemaless write will automatically create a table. You do not need to create a table manually, or an unknown error may occur. +Note: Schemaless writing creates tables automatically. Creating tables manually is not supported with schemaless writing. ## Schemaless Writing Line Protocol @@ -50,8 +48,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne - `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types. -For example, the following data rows write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column -as "passit" (BINARY), c4 column as 4 (DOUBLE), and the primary key timestamp as 1626006833639000000 to child table with the t1 label as "3" (NCHAR), the t2 label as "4" (NCHAR), and the t3 label as "t3" (NCHAR) and the super table named `st`. +For example, the following string indicates that the one row of data is written to the st supertable with the t1 tag as "3" (NCHAR), the t2 tag as "4" (NCHAR), and the t3 tag as "t3" (NCHAR); the c1 column is 3 (BIGINT), the c2 column is false (BOOL), the c3 column is "passit" (BINARY), the c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000. ```json st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 @@ -69,23 +66,31 @@ Schemaless writes process row data according to the following principles. "measurement,tag_key1=tag_value1,tag_key2=tag_value2" ``` +:::tip Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. -The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has. +The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t\_" is a fixed prefix that every table generated by this mapping relationship has. +::: + You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored. 2. If the super table obtained by parsing the line protocol does not exist, this super table is created. + **Important:** Manually creating supertables for schemaless writing is not supported. Schemaless writing creates appropriate supertables automatically. + 3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. + 4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental). -5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to - NULL. + +5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL. + 6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. + 7. Errors encountered throughout the processing will interrupt the writing process and return an error code. -8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, discarded since 3.0.3.0) -:::tip -All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed -48KB, and the total length of tag value cannot exceed 16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. +8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur. + Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used. +:::tip +All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. ::: ## Time resolution recognition @@ -114,8 +119,7 @@ In OpenTSDB file and JSON protocol modes, the precision of the timestamp is dete ## Data Model Mapping -This section describes how data in line protocol is mapped to a schema. The data measurement in each line is mapped to a -supertable name. The tag name in tag_set is the tag name in the schema, and the name in field_set is the column name in the schema. The following example shows how data is mapped: +This section describes how data in InfluxDB line protocol is mapped to a schema. The data measurement in each line is mapped to a supertable name. The tag name in tag_set is the tag name in the schema, and the name in field_set is the column name in the schema. The following example shows how data is mapped: ```json st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 @@ -160,7 +164,7 @@ The preceding data includes a new entry, c6, with type binary(6). When this occu TDengine guarantees the idempotency of data writes. This means that you can repeatedly call the API to perform write operations with bad data. However, TDengine does not guarantee the atomicity of multi-row writes. In a multi-row write, some data may be written successfully and other data unsuccessfully. -##: Error Codes +## Error Codes The TSDB_CODE_TSC_LINE_SYNTAX_ERROR indicates an error in the schemaless writing component. This error occurs when writing text. For other errors, schemaless writing uses the standard TDengine error codes diff --git a/docs/zh/08-connector/02-rest-api.mdx b/docs/zh/08-connector/02-rest-api.mdx index a081595bca5f66bff3c60808126516633f92614b..f3f1e087d809b7ce98d27721ada13b7a5fdf7159 100644 --- a/docs/zh/08-connector/02-rest-api.mdx +++ b/docs/zh/08-connector/02-rest-api.mdx @@ -383,6 +383,133 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata } ``` +## TDengine 2.x 和 3.0 之间 REST API 的差异 + +### URI + +| URI | TDengine 2.x | TDengine 3.0 | +| :--------------------| :------------------: | :--------------------------------------------------: | +| /rest/sql | 支持 | 支持 (响应代码和消息体不同) | +| /rest/sqlt | 支持 | 不再支持 | +| /rest/sqlutc | 支持 | 不再支持 | + + +### HTTP code + +| HTTP code | TDengine 2.x | TDengine 3.0 | 备注 | +| :--------------------| :------------------: | :----------: | :-----------------------------------: | +| 200 | 支持 | 支持 | 正确返回和 taosc 接口错误返回 | +| 400 | 不支持 | 支持 | 参数错误返回 | +| 401 | 不支持 | 支持 | 鉴权失败 | +| 404 | 支持 | 支持 | 接口不存在 | +| 500 | 不支持 | 支持 | 内部错误 | +| 503 | 支持 | 支持 | 系统资源不足 | + + +### 响应代码和消息体 + +#### TDengine 2.x 响应代码和消息体 + +```JSON +{ + "status": "succ", + "head": [ + "name", + "created_time", + "ntables", + "vgroups", + "replica", + "quorum", + "days", + "keep1,keep2,keep(D)", + "cache(MB)", + "blocks", + "minrows", + "maxrows", + "wallevel", + "fsync", + "comp", + "precision", + "status" + ], + "data": [ + [ + "log", + "2020-09-02 17:23:00.039", + 4, + 1, + 1, + 1, + 10, + "30,30,30", + 1, + 3, + 100, + 4096, + 1, + 3000, + 2, + "us", + "ready" + ] + ], + "rows": 1 +} +``` +``` + "data": [ + [ + "information_schema", + 16, + "ready" + ], + [ + "performance_schema", + 9, + "ready" + ] + ], +``` + +#### TDengine 3.0 响应代码和消息体 + + +```JSON +{ + "code": 0, + "column_meta": [ + [ + "name", + "VARCHAR", + 64 + ], + [ + "ntables", + "BIGINT", + 8 + ], + [ + "status", + "VARCHAR", + 10 + ] + ], + "data": [ + [ + "information_schema", + 16, + "ready" + ], + [ + "performance_schema", + 9, + "ready" + ] + ], + "rows": 2 +} +``` + ## 参考 [taosAdapter](/reference/taosadapter/) diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md index f10077345f394936d3df4eccf8811fc67f862c15..12e466f3491b779275d29bcfa79c3203aee01056 100644 --- a/docs/zh/12-taos-sql/14-stream.md +++ b/docs/zh/12-taos-sql/14-stream.md @@ -10,8 +10,11 @@ description: 流式计算的相关 SQL 的详细语法 ```sql CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery stream_options: { - TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] - WATERMARK time + TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time] + WATERMARK time + IGNORE EXPIRED [0|1] + DELETE_MARK time + FILL_HISTORY [0|1] } ``` @@ -202,3 +205,11 @@ PARTITION 子句中,为 concat("tag-", tbname)定义了一个别名cc, 对应 会对TAG信息进行如下检查 1.检查tag的schema信息是否匹配,对于不匹配的,则自动进行数据类型转换,当前只有数据长度大于4096byte时才报错,其余场景都能进行类型转换。 2.检查tag的个数是否相同,如果不同,需要显示的指定超级表与subquery的tag的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。 + +## 清理中间状态 + +``` +DELETE_MARK time +``` +DELETE_MARK用于删除缓存的窗口状态,也就是删除流计算的中间结果。如果不设置,默认值是10年 +T = 最新事件时间 - DELETE_MARK diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md index 73fa15313b183d3731592f6c3e84e8f01e581301..a797966f57a5d9c8c942de1cce3dd3209c067de6 100644 --- a/docs/zh/12-taos-sql/29-changes.md +++ b/docs/zh/12-taos-sql/29-changes.md @@ -27,7 +27,7 @@ description: "TDengine 3.0 版本的语法变更说明" | - | :------- | :-------- | :------- | | 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。 | 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。 -| 3 | ALTER DATABASE | 调整 | 废除 +| 3 | ALTER DATABASE | 调整 | 废除 | 4 | ALTER STABLE | 调整 | 废除 | 5 | ALTER TABLE | 调整 | 废除 | 6 | ALTER USER | 调整 | 废除 diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 849ab5a92afd6d3154f6985940f45c5ea1f431ca..a1a95d9f30472963ffe07d9eda4aa05aa60d9234 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -573,16 +573,18 @@ function install_config() { } function install_share_etc() { + [ ! -d ${script_dir}/share/etc ] && return for c in `ls ${script_dir}/share/etc/`; do if [ -e /etc/$c ]; then out=/etc/$c.new.`date +%F` - ${csudo}cp -f ${script_dir}/share/etc/$c $out + ${csudo}cp -f ${script_dir}/share/etc/$c $out ||: else - ${csudo}cp -f ${script_dir}/share/etc/$c /etc/$c + ${csudo}cp -f ${script_dir}/share/etc/$c /etc/$c ||: fi done - ${csudo} cp ${script_dir}/share/srv/* ${service_config_dir} + [ ! -d ${script_dir}/share/srv ] && return + ${csudo} cp ${script_dir}/share/srv/* ${service_config_dir} ||: } function install_log() { @@ -612,7 +614,7 @@ function install_examples() { function install_web() { if [ -d "${script_dir}/share" ]; then - ${csudo}cp -rf ${script_dir}/share/* ${install_main_dir}/share + ${csudo}cp -rf ${script_dir}/share/* ${install_main_dir}/share > /dev/null 2>&1 ||: fi } diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 12e215c62ba2adc3e776b50c7aef95a62b2a6e01..0dce526db6996f4a55090c3726483db83b752d2b 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -150,7 +150,7 @@ fi mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm -mkdir -p ${install_dir}/share && cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share +mkdir -p ${install_dir}/share && cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||: if [ $adapterName != "taosadapter" ]; then mv ${install_dir}/cfg/${clientName2}adapter.toml ${install_dir}/cfg/$adapterName.toml diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h index 0c9696f1c81a0cfe3be40e8207bd005ee375be94..cbef80b6daa63fd3192aae98cfb6170893381894 100644 --- a/source/client/inc/clientStmt.h +++ b/source/client/inc/clientStmt.h @@ -144,6 +144,15 @@ extern char *gStmtStatusStr[]; goto _return; \ } \ } while (0) +#define STMT_ERRI_JRET(c) \ + do { \ + code = c; \ + if (code != TSDB_CODE_SUCCESS) { \ + terrno = code; \ + goto _return; \ + } \ + } while (0) + #define STMT_ELOG(param, ...) qError("stmt:%p " param, pStmt, __VA_ARGS__) #define STMT_DLOG(param, ...) qDebug("stmt:%p " param, pStmt, __VA_ARGS__) diff --git a/source/client/src/clientJniConnector.c b/source/client/src/clientJniConnector.c index 2f4bafe26aa29a48e7c51c82275f14dba15a400d..d2a9665eee432e91547095016ad97c1f52a83319 100644 --- a/source/client/src/clientJniConnector.c +++ b/source/client/src/clientJniConnector.c @@ -816,7 +816,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI (*env)->ReleaseStringUTFChars(env, jname, name); jniError("bindTableName jobj:%p, conn:%p, code: 0x%x", jobj, tsconn, code); - return JNI_TDENGINE_ERROR; + return code; } jniDebug("jobj:%p, conn:%p, set stmt bind table name:%s", jobj, tsconn, name); @@ -891,7 +891,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI if (code != TSDB_CODE_SUCCESS) { jniError("tableNameTags jobj:%p, conn:%p, code: 0x%x", jobj, tsconn, code); - return JNI_TDENGINE_ERROR; + return code; } return JNI_SUCCESS; } @@ -957,7 +957,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp( if (code != TSDB_CODE_SUCCESS) { jniError("bindColData jobj:%p, conn:%p, code: 0x%x", jobj, tscon, code); - return JNI_TDENGINE_ERROR; + return code; } return JNI_SUCCESS; @@ -980,7 +980,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_addBatchImp(JNIEn int32_t code = taos_stmt_add_batch(pStmt); if (code != TSDB_CODE_SUCCESS) { jniError("add batch jobj:%p, conn:%p, code: 0x%x", jobj, tscon, code); - return JNI_TDENGINE_ERROR; + return code; } jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon); @@ -1004,7 +1004,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J int32_t code = taos_stmt_execute(pStmt); if (code != TSDB_CODE_SUCCESS) { jniError("excute batch jobj:%p, conn:%p, code: 0x%x", jobj, tscon, code); - return JNI_TDENGINE_ERROR; + return code; } jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon); @@ -1028,7 +1028,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv int32_t code = taos_stmt_close(pStmt); if (code != TSDB_CODE_SUCCESS) { jniError("close stmt jobj:%p, conn:%p, code: 0x%x", jobj, tscon, code); - return JNI_TDENGINE_ERROR; + return code; } jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon); diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 94c7fa74d4a23ec8050b0d7cec569af6a3d2ef41..17150286e1407e65a38e040877da7e053ca2e946 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -699,7 +699,7 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, pReq.numOfTags = 1; SField field = {0}; field.type = TSDB_DATA_TYPE_NCHAR; - field.bytes = 1; + field.bytes = TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; strcpy(field.name, tsSmlTagName); taosArrayPush(pReq.pTags, &field); } diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 71a41c68e7115d254c1c25b87b9f5aaf54498394..6e529f1a0b44556d5f07e750f050f83711fdc0b2 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -975,15 +975,17 @@ int stmtIsInsert(TAOS_STMT* stmt, int* insert) { } int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) { + int32_t code = 0; STscStmt* pStmt = (STscStmt*)stmt; + int32_t preCode = pStmt->errCode; STMT_DLOG_E("start to get tag fields"); if (STMT_TYPE_QUERY == pStmt->sql.type) { - STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR); + STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR); } - STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { @@ -995,27 +997,33 @@ int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) { pStmt->exec.pRequest = NULL; } - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); + STMT_ERRI_JRET(stmtParseSql(pStmt)); } - STMT_ERR_RET(stmtFetchTagFields(stmt, nums, fields)); + STMT_ERRI_JRET(stmtFetchTagFields(stmt, nums, fields)); - return TSDB_CODE_SUCCESS; +_return: + + pStmt->errCode = preCode; + + return code; } int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) { + int32_t code = 0; STscStmt* pStmt = (STscStmt*)stmt; + int32_t preCode = pStmt->errCode; STMT_DLOG_E("start to get col fields"); if (STMT_TYPE_QUERY == pStmt->sql.type) { - STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR); + STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR); } - STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { @@ -1027,15 +1035,19 @@ int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) { pStmt->exec.pRequest = NULL; } - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); + STMT_ERRI_JRET(stmtParseSql(pStmt)); } - STMT_ERR_RET(stmtFetchColFields(stmt, nums, fields)); + STMT_ERRI_JRET(stmtFetchColFields(stmt, nums, fields)); - return TSDB_CODE_SUCCESS; +_return: + + pStmt->errCode = preCode; + + return code; } int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index e4e0d608de4de3ae95786f3b1754b18ca3805cdd..756ca008b06eaa19b2871295d816452210cb1427 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -114,11 +114,11 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo); if (code != 0) { if (terrno != 0) code = terrno; - dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr(code)); + dGError("vnodeProcessFetchMsg vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr()); vmSendRsp(pMsg, code); } - dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); + dGTrace("vnodeProcessFetchMsg vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code); rpcFreeCont(pMsg->pCont); taosFreeQitem(pMsg); } diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 34914d80f0c1d8b7ca37112b750b2d7650bd65e5..3a1c4ce58f23cc951bf4ab41014eb89bde443d8b 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -156,7 +156,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) { size_t valueLen = 0; valueLen = strlen(stb); size += sizeof(int32_t); - size += keyLen; + size += valueLen; stb = taosHashIterate(pUser->writeTbs, stb); } @@ -369,7 +369,7 @@ static SSdbRow *mndUserActionDecode(SSdbRaw *pRaw) { int32_t valuelen = 0; SDB_GET_INT32(pRaw, dataPos, &valuelen, _OVER); char *value = taosMemoryCalloc(valuelen, sizeof(char)); - memset(value, 0, keyLen); + memset(value, 0, valuelen); SDB_GET_BINARY(pRaw, dataPos, value, valuelen, _OVER) taosHashPut(pUser->writeTbs, key, keyLen, value, valuelen); @@ -458,6 +458,31 @@ SHashObj *mndDupTableHash(SHashObj *pOld) { return pNew; } +SHashObj *mndDupUseDbHash(SHashObj *pOld) { + SHashObj *pNew = + taosHashInit(taosHashGetSize(pOld), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK); + if (pNew == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + int32_t *db = taosHashIterate(pOld, NULL); + while (db != NULL) { + size_t keyLen = 0; + char *key = taosHashGetKey(db, &keyLen); + + if (taosHashPut(pNew, key, keyLen, db, sizeof(*db)) != 0) { + taosHashCancelIterate(pOld, db); + taosHashCleanup(pNew); + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + db = taosHashIterate(pOld, db); + } + + return pNew; +} + static int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) { memcpy(pNew, pUser, sizeof(SUserObj)); pNew->authVersion++; @@ -469,7 +494,7 @@ static int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) { pNew->readTbs = mndDupTableHash(pUser->readTbs); pNew->writeTbs = mndDupTableHash(pUser->writeTbs); pNew->topics = mndDupTopicHash(pUser->topics); - pNew->useDbs = mndDupDbHash(pUser->useDbs); + pNew->useDbs = mndDupUseDbHash(pUser->useDbs); taosRUnLockLatch(&pUser->lock); if (pNew->readDbs == NULL || pNew->writeDbs == NULL || pNew->topics == NULL) { diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 54e4e393ecff1c001fcd778005b0645b13f589c8..72b478e6bfcf58d4aab59fb776ad91bb9a193180 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -296,10 +296,9 @@ void tqCloseReader(STqReader* pReader) { int32_t tqSeekVer(STqReader* pReader, int64_t ver, const char* id) { if (walReadSeekVer(pReader->pWalReader, ver) < 0) { - tqDebug("tmq poll: wal reader failed to seek to ver:%"PRId64" code:%s, %s", ver, tstrerror(terrno), id); return -1; } - tqDebug("tmq poll: wal reader seek to ver:%"PRId64" %s", ver, id); + tqDebug("tmq poll: wal reader seek to ver success ver:%"PRId64" %s", ver, id); return 0; } diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 1266c37dda1893709afb406982edd07917f188b4..b2b2b5a87e4eea144de7c0302756e70dca391cc0 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -1400,10 +1400,10 @@ int32_t ctgChkSetAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) { pRes->pass = false; pRes->pCond = NULL; - // if (!pInfo->enable) { - // pRes->pass = false; - // return TSDB_CODE_SUCCESS; - // } + if (!pInfo->enable) { + pRes->pass = false; + return TSDB_CODE_SUCCESS; + } if (pInfo->superAuth) { pRes->pass = true; @@ -1453,7 +1453,8 @@ int32_t ctgChkSetAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) { } case AUTH_TYPE_READ_OR_WRITE: { if ((pInfo->readDbs && taosHashGet(pInfo->readDbs, dbFName, strlen(dbFName))) || - (pInfo->writeDbs && taosHashGet(pInfo->writeDbs, dbFName, strlen(dbFName)))) { + (pInfo->writeDbs && taosHashGet(pInfo->writeDbs, dbFName, strlen(dbFName))) || + (pInfo->useDbs && taosHashGet(pInfo->useDbs, dbFName, strlen(dbFName)))) { pRes->pass = true; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 1fd3cfa1bf0ae4ca443c9e844840749a6f3e8dea..0229631d404b31926866d3bd56df9f6e433f3406 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1062,6 +1062,7 @@ int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; if ((pTaskInfo->execModel != OPTR_EXEC_MODEL_QUEUE) || (pTaskInfo->streamInfo.submit.msgStr != NULL)) { qError("qStreamSetScanMemData err:%d,%p", pTaskInfo->execModel, pTaskInfo->streamInfo.submit.msgStr); + terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return -1; } qDebug("set the submit block for future scan"); @@ -1125,6 +1126,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT } else { taosRUnLockLatch(&pTaskInfo->lock); qError("no table in table list, %s", id); + terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return -1; } } @@ -1144,6 +1146,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT } else { qError("vgId:%d uid:%" PRIu64 " not found in table list, total:%d, index:%d %s", pTaskInfo->id.vgId, uid, numOfTables, pScanInfo->currentTable, id); + terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return -1; } @@ -1176,6 +1179,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT pScanBaseInfo->cond.twindows.skey = oldSkey; } else { qError("invalid pOffset->type:%d, %s", pOffset->type, id); + terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return -1; } @@ -1190,6 +1194,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT if (setForSnapShot(sContext, pOffset->uid) != 0) { qError("setDataForSnapShot error. uid:%" PRId64 " , %s", pOffset->uid, id); + terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return -1; } @@ -1226,6 +1231,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT SSnapContext* sContext = pInfo->sContext; if (setForSnapShot(sContext, pOffset->uid) != 0) { qError("setForSnapShot error. uid:%" PRIu64 " ,version:%" PRId64, pOffset->uid, pOffset->version); + terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return -1; } qDebug("tmqsnap qStreamPrepareScan snapshot meta uid:%" PRId64 " ts %" PRId64 " %s", pOffset->uid, pOffset->ts, diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 9ab3983ac1715f7743fd5fdc8b249fc63571a79d..a0697a71026688883bc3865e9e1df6860cb94989 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -998,6 +998,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan SOperatorInfo* extractOperatorInTree(SOperatorInfo* pOperator, int32_t type, const char* id) { if (pOperator == NULL) { qError("invalid operator, failed to find tableScanOperator %s", id); + terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return NULL; } @@ -1006,6 +1007,7 @@ SOperatorInfo* extractOperatorInTree(SOperatorInfo* pOperator, int32_t type, con } else { if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) { qError("invalid operator, failed to find tableScanOperator %s", id); + terrno = TSDB_CODE_PAR_INTERNAL_ERROR; return NULL; } diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 15c4747e6e295c36dcaee9c4e4b21ba7009bf831..eb2efd573d1767ccbe1dac7e20f5e712796a84d7 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -2027,7 +2027,7 @@ static int32_t buildInsertUserAuthReq(const char* pUser, SName* pName, SArray** SUserAuthInfo userAuth = {.type = AUTH_TYPE_WRITE}; snprintf(userAuth.user, sizeof(userAuth.user), "%s", pUser); - // tNameGetFullDbName(pName, userAuth.dbFName); + memcpy(&userAuth.tbName, pName, sizeof(SName)); taosArrayPush(*pUserAuth, &userAuth); return TSDB_CODE_SUCCESS; diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index ad6127ead23d0713c9fa963741d460a30f50c249..db4e3a475984d60067ac4456dd10f311b8056e8b 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -207,17 +207,12 @@ int32_t walReadSeekVer(SWalReader *pReader, int64_t ver) { return 0; } -// pReader->curInvalid = 1; -// pReader->curVersion = ver; - if (ver > pWal->vers.lastVer || ver < pWal->vers.firstVer) { - wDebug("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pReader->pWal->cfg.vgId, + wInfo("vgId:%d, invalid index:%" PRId64 ", first index:%" PRId64 ", last index:%" PRId64, pReader->pWal->cfg.vgId, ver, pWal->vers.firstVer, pWal->vers.lastVer); terrno = TSDB_CODE_WAL_LOG_NOT_EXIST; return -1; } -// if (ver < pWal->vers.snapshotVer) { -// } if (walReadSeekVerImpl(pReader, ver) < 0) { return -1; @@ -236,8 +231,6 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { if (pRead->curVersion != fetchVer) { if (walReadSeekVer(pRead, fetchVer) < 0) { -// pRead->curVersion = fetchVer; -// pRead->curInvalid = 1; return -1; } seeked = true; @@ -256,7 +249,6 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { } else { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } -// pRead->curInvalid = 1; return -1; } } diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index ec6309c71ad295eb504d8f97b493a062a044fed1..f96ed8a3ff3e6656c9b81e5f82efa1fed6d3bb36 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -24,7 +24,7 @@ class TDTestCase: tdSql.init(conn.cursor(), True) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self, dbname="sml_db"): + def checkContent(self, dbname="sml_db"): simClientCfg="%s/taos.cfg"%tdDnodes.getSimCfgPath() buildPath = tdCom.getBuildPath() cmdStr = '%s/build/bin/sml_test %s'%(buildPath, simClientCfg) @@ -102,7 +102,7 @@ class TDTestCase: def run(self): tdSql.prepare() - self.checkFileContent() + self.checkContent() def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py index fa22cad726ee700dd05350d0d03d5463bfbbcdec..7d11684ed81fd79584253e032f67cae1bd833de8 100644 --- a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py +++ b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py @@ -32,34 +32,6 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") paraDict = {'dbName': 'dbt', diff --git a/tests/system-test/7-tmq/tmq3mnodeSwitch.py b/tests/system-test/7-tmq/tmq3mnodeSwitch.py index 22ef8cebdc1f367d8d95010087e15b2eaf2f72e8..0740830696987a6009db020de14bdc768d56c69a 100644 --- a/tests/system-test/7-tmq/tmq3mnodeSwitch.py +++ b/tests/system-test/7-tmq/tmq3mnodeSwitch.py @@ -138,34 +138,6 @@ class TDTestCase: else: tdLog.exit("three mnodes is not ready in 10s ") - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'db1', @@ -257,7 +229,7 @@ class TDTestCase: tdLog.exit("0 tmq consume rows error!") if expectRowsList[0] == resultList[0]: - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) time.sleep(10) for i in range(len(topicNameList)): diff --git a/tests/system-test/7-tmq/tmqCheckData.py b/tests/system-test/7-tmq/tmqCheckData.py index a9671241a966ea632f31726349580a8e7152f440..cb5a40642aab7ba2053780b898fa498e2c8b49a3 100644 --- a/tests/system-test/7-tmq/tmqCheckData.py +++ b/tests/system-test/7-tmq/tmqCheckData.py @@ -5,6 +5,7 @@ import time import socket import os import threading +import math from util.log import * from util.sql import * @@ -21,34 +22,6 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'db1', @@ -110,7 +83,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() @@ -136,7 +109,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("1 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() @@ -162,7 +135,7 @@ class TDTestCase: # tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) # tdLog.exit("2 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) time.sleep(10) for i in range(len(topicNameList)): diff --git a/tests/system-test/7-tmq/tmqCheckData1.py b/tests/system-test/7-tmq/tmqCheckData1.py index e06c29c5a2770ec7f2e2a34d391495ee24399f0c..b4fec94dcc3a2a266a35400dc789ee13aa463caf 100644 --- a/tests/system-test/7-tmq/tmqCheckData1.py +++ b/tests/system-test/7-tmq/tmqCheckData1.py @@ -21,34 +21,6 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'db1', @@ -110,7 +82,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() @@ -135,7 +107,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("1 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() @@ -160,7 +132,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) tdLog.exit("2 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) time.sleep(10) for i in range(len(topicNameList)): diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py index 1e636e207441e7794f312ffa00e27accde0eccbe..f63c70a4c68ba0d88149326f3cd15bab764b6165 100644 --- a/tests/system-test/7-tmq/tmqCommon.py +++ b/tests/system-test/7-tmq/tmqCommon.py @@ -10,7 +10,7 @@ ################################################################### # -*- coding: utf-8 -*- - +import math from asyncore import loop from collections import defaultdict import subprocess @@ -467,18 +467,24 @@ class TMQCom: for i in range(0,skipRowsOfCons): consumeFile.readline() - lines = 0 while True: dst = queryFile.readline() src = consumeFile.readline() - lines += 1 - if dst: - if dst != src: - tdLog.info("src row: %s"%src) - tdLog.info("dst row: %s"%dst) - tdLog.exit("consumerId %d consume rows[%d] is not match the rows by direct query"%(consumerId, lines)) - else: + dstSplit = dst.split(',') + srcSplit = src.split(',') + + if not dst or not src: break + if len(dstSplit) != len(srcSplit): + tdLog.exit("consumerId %d consume rows len is not match the rows by direct query,len(dstSplit):%d != len(srcSplit):%d, dst:%s, src:%s" + %(consumerId, len(dstSplit), len(srcSplit), dst, src)) + + for i in range(len(dstSplit)): + if srcSplit[i] != dstSplit[i]: + srcFloat = float(srcSplit[i]) + dstFloat = float(dstSplit[i]) + if not math.isclose(srcFloat, dstFloat, abs_tol=1e-9): + tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) return def getResultFileByTaosShell(self, consumerId, queryString): diff --git a/tests/system-test/7-tmq/tmqConsumerGroup.py b/tests/system-test/7-tmq/tmqConsumerGroup.py index b1aef9d762f7d994c0bfa02ab04b0c64b76edffa..d146dca4497b9f60d204ca067f3bcd299c204dae 100644 --- a/tests/system-test/7-tmq/tmqConsumerGroup.py +++ b/tests/system-test/7-tmq/tmqConsumerGroup.py @@ -21,34 +21,6 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'db1', diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py index a923232706c62fa1343b65e936cbc7aac1b6df5f..bee38ca8ee192c542ebc2c09718d4e4224904c1e 100644 --- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py @@ -60,34 +60,6 @@ class TDTestCase: tdLog.exit("create udf functions fail") return - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") paraDict = {'dbName': 'dbt', @@ -201,7 +173,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) # reinit consume info, and start tmq_sim, then check consume result @@ -228,7 +200,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("1 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) time.sleep(10) @@ -312,7 +284,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("2 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) # reinit consume info, and start tmq_sim, then check consume result @@ -339,7 +311,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("3 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) time.sleep(10) diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py index bee174376daca3c0244464c85f80124929e00ce8..d3b64d2b2150521c1a3bc98fe41734390609084f 100644 --- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py @@ -60,34 +60,6 @@ class TDTestCase: tdLog.exit("create udf functions fail") return - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") paraDict = {'dbName': 'dbt', @@ -201,7 +173,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) # reinit consume info, and start tmq_sim, then check consume result @@ -228,7 +200,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("1 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) time.sleep(10) @@ -312,7 +284,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("2 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) # reinit consume info, and start tmq_sim, then check consume result @@ -339,7 +311,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("3 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) time.sleep(10) diff --git a/tests/system-test/7-tmq/tmqUdf.py b/tests/system-test/7-tmq/tmqUdf.py index 5bb8e3034ceec89c1161b6a5c9bfd9eb5cfddc82..5da1625cb125091dddc6ddbe3e635dc404352bbb 100644 --- a/tests/system-test/7-tmq/tmqUdf.py +++ b/tests/system-test/7-tmq/tmqUdf.py @@ -60,34 +60,6 @@ class TDTestCase: tdLog.exit("create udf functions fail") return - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") paraDict = {'dbName': 'dbt', @@ -201,7 +173,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) @@ -229,7 +201,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("1 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) time.sleep(10) @@ -313,7 +285,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("2 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) # reinit consume info, and start tmq_sim, then check consume result @@ -340,7 +312,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("3 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) time.sleep(10) diff --git a/tests/system-test/99-TDcase/TD-16821.py b/tests/system-test/99-TDcase/TD-16821.py index 78ac172f3075733aff7b3608fb29400b32adc01d..2e23002059c5d157b3e1f6edf2f21a5d291739bc 100644 --- a/tests/system-test/99-TDcase/TD-16821.py +++ b/tests/system-test/99-TDcase/TD-16821.py @@ -21,34 +21,6 @@ class TDTestCase: tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - def checkFileContent(self, consumerId, queryString): - buildPath = tdCom.getBuildPath() - cfgPath = tdCom.getClientCfgPath() - dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId) - cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) - tdLog.info(cmdStr) - os.system(cmdStr) - - consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) - tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) - - consumeFile = open(consumeRowsFile, mode='r') - queryFile = open(dstFile, mode='r') - - # skip first line for it is schema - queryFile.readline() - - while True: - dst = queryFile.readline() - src = consumeFile.readline() - - if dst: - if dst != src: - tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) - else: - break - return - def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'db1', @@ -114,7 +86,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() @@ -140,7 +112,7 @@ class TDTestCase: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0])) tdLog.exit("1 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() @@ -166,7 +138,7 @@ class TDTestCase: # tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) # tdLog.exit("2 tmq consume rows error!") - # self.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) time.sleep(10) for i in range(len(topicNameList)): diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 70130397ba7d4b9539080bff1ad81c193e751a54..01ca2efaba93b445d3c11420284fea00ab8b135f 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -317,7 +317,6 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i quotationStr[0] = '\"'; quotationStr[1] = 0; - int n; char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: @@ -348,15 +347,11 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i taosFprintfFile(pFile, "%" PRIu64, *((uint64_t *)val)); break; case TSDB_DATA_TYPE_FLOAT: - taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val)); + taosFprintfFile(pFile, "%e", GET_FLOAT_VAL(val)); break; case TSDB_DATA_TYPE_DOUBLE: - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val)); - if (n > TMAX(25, length)) { - taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val)); - } else { - taosFprintfFile(pFile, "%s", buf); - } + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", 23, GET_DOUBLE_VAL(val)); + taosFprintfFile(pFile, "%s", buf); break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: @@ -512,7 +507,6 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t return; } - int n; char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: @@ -543,15 +537,11 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t printf("%*" PRIu64, width, *((uint64_t *)val)); break; case TSDB_DATA_TYPE_FLOAT: - printf("%*ef", width, GET_FLOAT_VAL(val)); + printf("%*e", width, GET_FLOAT_VAL(val)); break; case TSDB_DATA_TYPE_DOUBLE: - n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val)); - if (n > TMAX(25, width)) { - printf("%*.15e", width, GET_DOUBLE_VAL(val)); - } else { - printf("%s", buf); - } + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%.9e", GET_DOUBLE_VAL(val)); + printf("%*s", width, buf); break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: