diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index c273e9889f0ea7b166a90b7131594564a2b57b25..0f7a450920b1a60a0af508062d104c26f592a15f 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 2dba49c + GIT_TAG 3588b3d SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index 66f7d5d5941866c803f05cd825ca8cc2347f9a52..6191492b37e09096ad1d7c8da4567130579ca20b 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -52,7 +52,7 @@ Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) i taosBenchmark ``` -This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`. +This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`. The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds. @@ -74,10 +74,10 @@ Query the average, maximum, and minimum values of all 100 million rows of data: SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters; ``` -Query the number of rows whose `location` tag is `San Francisco`: +Query the number of rows whose `location` tag is `California.SanFrancisco`: ```sql -SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco"; +SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco"; ``` Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`: diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md index 7257fccc80ea8df05449e7e29842d89794d702bf..b0400de67388a433355358d827aa41f35598655c 100644 --- a/docs/en/05-get-started/03-package.md +++ b/docs/en/05-get-started/03-package.md @@ -221,7 +221,7 @@ Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) i taosBenchmark ``` -This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`. +This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`. The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds. @@ -243,10 +243,10 @@ Query the average, maximum, and minimum values of all 100 million rows of data: SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters; ``` -Query the number of rows whose `location` tag is `San Francisco`: +Query the number of rows whose `location` tag is `California.SanFrancisco`: ```sql -SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco"; +SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco"; ``` Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`: diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx index 0d391c6ac308c5e9e998e2e7e3423cc5a809905e..530287e2a424592045755eeecf37894f59639ef3 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -155,15 +155,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { let inserted = taos.exec_many([ // create super table "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ - TAGS (`groupid` INT, `location` BINARY(16))", + TAGS (`groupid` INT, `location` BINARY(24))", // create child table - "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')", // insert into child table "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", // insert with NULL values "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", // insert and automatically create table with tags if not exists - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)", + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)", // insert many records in a single sql "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", ]).await?; diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java index 179e6e6911185631901b79e34a343967e73c4936..e9af5e9ce0c0473f4513cbb949dcbd9f433c0c92 100644 --- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java @@ -38,12 +38,12 @@ public class SubscribeDemo { statement.executeUpdate("create database " + DB_NAME); statement.executeUpdate("use " + DB_NAME); statement.executeUpdate( - "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(16))"); - statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')"); + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))"); + statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')"); statement.executeUpdate("INSERT INTO `d0` values(now - 10s, 0.32, 116)"); statement.executeUpdate("INSERT INTO `d0` values(now - 8s, NULL, NULL)"); statement.executeUpdate( - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119)"); + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119)"); statement.executeUpdate( "INSERT INTO `d1` values (now-8s, 10, 120) (now - 6s, 10, 119) (now - 4s, 11.2, 118)"); // create topic @@ -75,4 +75,4 @@ public class SubscribeDemo { } timer.cancel(); } -} \ No newline at end of file +} diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java index 6fe83f002ebcb9d82e026e9a32886fd22bfefbe9..f0ebc53b4b9a588ac4a23461553dd5c9f1a9f00b 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java @@ -16,7 +16,7 @@ class MockDataSource implements Iterator { private int currentTbId = -1; // mock values - String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"}; + String[] location = {"California.LosAngeles", "California.SanDiego", "California.SanJose", "California.Campbell", "California.SanFrancisco"}; float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f}; int[] voltage = {119, 116, 111, 113, 118}; float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f}; @@ -50,4 +50,4 @@ class MockDataSource implements Iterator { return sb.toString(); } -} \ No newline at end of file +} diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py index 852860aec0adc8f9b043c9dcd5deb0bf00239201..1c516a800e007934f8e6815f82024a53fea70073 100644 --- a/docs/examples/python/mockdatasource.py +++ b/docs/examples/python/mockdatasource.py @@ -3,11 +3,11 @@ import time class MockDataSource: samples = [ - "8.8,119,0.32,LosAngeles,0", - "10.7,116,0.34,SanDiego,1", - "9.9,111,0.33,Hollywood,2", - "8.9,113,0.329,Compton,3", - "9.4,118,0.141,San Francisco,4" + "8.8,119,0.32,California.LosAngeles,0", + "10.7,116,0.34,California.SanDiego,1", + "9.9,111,0.33,California.SanJose,2", + "8.9,113,0.329,California.Campbell,3", + "9.4,118,0.141,California.SanFrancisco,4" ] def __init__(self, tb_name_prefix, table_count): diff --git a/docs/examples/rust/nativeexample/examples/stmt_example.rs b/docs/examples/rust/nativeexample/examples/stmt_example.rs index 26084746f20a3662383b417eb98016f09ad0913e..9cf8e8e1fc2526206486fa9a61c01f6320564131 100644 --- a/docs/examples/rust/nativeexample/examples/stmt_example.rs +++ b/docs/examples/rust/nativeexample/examples/stmt_example.rs @@ -12,7 +12,7 @@ async fn main() -> anyhow::Result<()> { // bind table name and tags stmt.set_tbname_tags( "d1001", - &[Value::VarChar("San Fransico".into()), Value::Int(2)], + &[Value::VarChar("California.SanFransico".into()), Value::Int(2)], )?; // bind values. let values = vec![ diff --git a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs index 7e0a347948fc8450dead0babbbdd1eace2f06d1e..11d6d4e0043fddeff73c09d86c0fce0abc903a08 100644 --- a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs +++ b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs @@ -19,13 +19,13 @@ struct Record { async fn prepare(taos: Taos) -> anyhow::Result<()> { let inserted = taos.exec_many([ // create child table - "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')", // insert into child table "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", // insert with NULL values "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", // insert and automatically create table with tags if not exists - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)", + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)", // insert many records in a single sql "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", ]).await?; @@ -48,7 +48,7 @@ async fn main() -> anyhow::Result<()> { format!("CREATE DATABASE `{db}`"), format!("USE `{db}`"), // create super table - format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"), + format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"), // create topic for subscription format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") ]) diff --git a/docs/examples/rust/restexample/examples/insert_example.rs b/docs/examples/rust/restexample/examples/insert_example.rs index 27b2bb4788615810d097b88f0dd616b96885538c..11a84f166103eba03b43549d4db77100a92a58e6 100644 --- a/docs/examples/rust/restexample/examples/insert_example.rs +++ b/docs/examples/rust/restexample/examples/insert_example.rs @@ -14,14 +14,14 @@ async fn main() -> anyhow::Result<()> { ]).await?; let inserted = taos.exec("INSERT INTO - power.d1001 USING power.meters TAGS('San Francisco', 2) + power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS('San Francisco', 3) + power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS('Los Angeles', 2) + power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS('Los Angeles', 3) + power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)").await?; assert_eq!(inserted, 8); diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md index e3345fed966f6940dd0c6665301c15d18501c787..0f004581b5be470f3c99f48e1aaecb3578c442ab 100644 --- a/docs/zh/05-get-started/01-docker.md +++ b/docs/zh/05-get-started/01-docker.md @@ -52,7 +52,7 @@ taos> $ taosBenchmark ``` -该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `Campbell`、`Cupertino`、`Los Angeles`、`Mountain View`、`Palo Alto`、`San Diego`、`San Francisco`、`San Jose`、`Santa Clara` 或者 `Sunnyvale`。 +该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 @@ -74,10 +74,10 @@ SELECT COUNT(*) FROM test.meters; SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters; ``` -查询 location = "San Francisco" 的记录总条数: +查询 location = "California.SanFrancisco" 的记录总条数: ```sql -SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco"; +SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco"; ``` 查询 groupId = 10 的所有记录的平均值、最大值、最小值等: diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index cb2553a0bfe6e946422514562ebeb9326511a480..66863d1bd9419e6d8fc78cf1536e1efac38885d6 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -223,7 +223,7 @@ Query OK, 2 row(s) in set (0.003128s) $ taosBenchmark ``` -该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `Campbell`、`Cupertino`、`Los Angeles`、`Mountain View`、`Palo Alto`、`San Diego`、`San Francisco`、`San Jose`、`Santa Clara` 或者 `Sunnyvale`。 +该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 @@ -245,10 +245,10 @@ SELECT COUNT(*) FROM test.meters; SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters; ``` -查询 location = "San Francisco" 的记录总条数: +查询 location = "California.SanFrancisco" 的记录总条数: ```sql -SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco"; +SELECT COUNT(*) FROM test.meters WHERE location = "Calaifornia.SanFrancisco"; ``` 查询 groupId = 10 的所有记录的平均值、最大值、最小值等: diff --git a/docs/zh/08-connector/06-rust.mdx b/docs/zh/08-connector/06-rust.mdx index 26f53c82d630fda168dd98b4c8ec993afc5e3a1d..b838e5c5a2f5c33f09dce6cf1a8d5f61ddc5f264 100644 --- a/docs/zh/08-connector/06-rust.mdx +++ b/docs/zh/08-connector/06-rust.mdx @@ -155,15 +155,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { let inserted = taos.exec_many([ // create super table "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ - TAGS (`groupid` INT, `location` BINARY(16))", + TAGS (`groupid` INT, `location` BINARY(24))", // create child table - "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')", // insert into child table "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)", // insert with NULL values "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)", // insert and automatically create table with tags if not exists - "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)", + "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)", // insert many records in a single sql "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)", ]).await?; diff --git a/include/common/tmsg.h b/include/common/tmsg.h index d3db2f318cb12822ba3fa3f379bd210160a3ca7a..dfce7e54d0218c7d5e0aa45acf62765343c8546a 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1201,6 +1201,7 @@ typedef struct { int16_t sstTrigger; int16_t hashPrefix; int16_t hashSuffix; + int32_t tsdbPageSize; } SCreateVnodeReq; int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq); diff --git a/include/util/tsched.h b/include/util/tsched.h index 347cacd19185b0891deadfdbdc6a0b42b8a71d18..379456afe64ebfd225bcec8dc873a23c54e6e29b 100644 --- a/include/util/tsched.h +++ b/include/util/tsched.h @@ -31,7 +31,6 @@ typedef struct SSchedMsg { void *thandle; } SSchedMsg; - typedef struct { char label[TSDB_LABEL_LEN]; tsem_t emptySem; @@ -48,7 +47,6 @@ typedef struct { void *pTimer; } SSchedQueue; - /** * Create a thread-safe ring-buffer based task queue and return the instance. A thread * pool will be created to consume the messages in the queue. @@ -57,7 +55,7 @@ typedef struct { * @param label the label of the queue * @return the created queue scheduler */ -void *taosInitScheduler(int32_t capacity, int32_t numOfThreads, const char *label, SSchedQueue* pSched); +void *taosInitScheduler(int32_t capacity, int32_t numOfThreads, const char *label, SSchedQueue *pSched); /** * Create a thread-safe ring-buffer based task queue and return the instance. @@ -83,7 +81,7 @@ void taosCleanUpScheduler(void *queueScheduler); * @param queueScheduler the queue scheduler instance * @param pMsg the message for the task */ -void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg); +int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg); #ifdef __cplusplus } diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 5ebc2729f8b4a2d87316c5d317e0b4c8667edf88..39b4b069a07949b29717d1002ffaddc2501d4a77 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1399,7 +1399,12 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { arg->msg = *pMsg; arg->pEpset = tEpSet; - taosAsyncExec(doProcessMsgFromServer, arg, NULL); + if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) { + tscError("failed to sched msg to tsc, tsc ready to quit"); + rpcFreeCont(pMsg->pCont); + taosMemoryFree(arg->pEpset); + taosMemoryFree(arg); + } } TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) { diff --git a/source/common/src/systable.c b/source/common/src/systable.c index f77320c5acc605f2be4df0e7e14d8800c1fee94a..496806d87794416e19cd4338659b87ee6f520792 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -102,9 +102,10 @@ static const SSysDbTableSchema userDBSchema[] = { {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true}, - {.name = "sst_trigger", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, + {.name = "stt_trigger", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, {.name = "table_prefix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, {.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, + {.name = "tsdb_pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, }; static const SSysDbTableSchema userFuncSchema[] = { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 7d8461ca6046d3ae3dd6795809b2b9509d680f0b..cbd6bc3ed7e62a9edf9d4baac18e0c7057a2ae10 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -3782,6 +3782,7 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR if (tEncodeI16(&encoder, pReq->sstTrigger) < 0) return -1; if (tEncodeI16(&encoder, pReq->hashPrefix) < 0) return -1; if (tEncodeI16(&encoder, pReq->hashSuffix) < 0) return -1; + if (tEncodeI32(&encoder, pReq->tsdbPageSize) < 0) return -1; tEndEncode(&encoder); @@ -3857,6 +3858,7 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq * if (tDecodeI16(&decoder, &pReq->sstTrigger) < 0) return -1; if (tDecodeI16(&decoder, &pReq->hashPrefix) < 0) return -1; if (tDecodeI16(&decoder, &pReq->hashSuffix) < 0) return -1; + if (tDecodeI32(&decoder, &pReq->tsdbPageSize) < 0) return -1; tEndDecode(&decoder); tDecoderClear(&decoder); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 981576efec3f4fab0bf3a64607abfa961a637b06..e3ad1b938979e3012a03ae7903450dd6630442d5 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -173,6 +173,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) { pCfg->hashMethod = pCreate->hashMethod; pCfg->hashPrefix = pCreate->hashPrefix; pCfg->hashSuffix = pCreate->hashSuffix; + pCfg->tsdbPageSize = pCreate->tsdbPageSize * 1024; pCfg->standby = pCfg->standby; pCfg->syncCfg.myIndex = pCreate->selfIndex; @@ -222,9 +223,11 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return -1; } - dInfo("vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d sstTrigger:%d", - createReq.vgId, createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize, - createReq.sstTrigger); + dInfo( + "vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d sstTrigger:%d " + "tsdbPageSize:%d", + createReq.vgId, createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize, + createReq.sstTrigger, createReq.tsdbPageSize); dInfo("vgId:%d, hashMethod:%d begin:%u end:%u prefix:%d surfix:%d", createReq.vgId, createReq.hashMethod, createReq.hashBegin, createReq.hashEnd, createReq.hashPrefix, createReq.hashSuffix); vmGenerateVnodeCfg(&createReq, &vnodeCfg); diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 9632be1b24ba4561804a1f7ec0e7bf1205f23e4c..cd08ee4ad5210d9ce36e391c346a681a82abd373 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -308,6 +308,7 @@ typedef struct { int16_t hashPrefix; int16_t hashSuffix; int16_t sstTrigger; + int32_t tsdbPageSize; int32_t numOfRetensions; SArray* pRetensions; int32_t walRetentionPeriod; diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 9e58b74017918d6b99eae59adecb501ac9989df4..d42ba1f7ffc45f0fb70457c01c26b35ef206454f 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -31,7 +31,7 @@ #include "systable.h" #define DB_VER_NUMBER 1 -#define DB_RESERVE_SIZE 58 +#define DB_RESERVE_SIZE 54 static SSdbRaw *mndDbActionEncode(SDbObj *pDb); static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw); @@ -128,6 +128,7 @@ static SSdbRaw *mndDbActionEncode(SDbObj *pDb) { SDB_SET_INT16(pRaw, dataPos, pDb->cfg.sstTrigger, _OVER) SDB_SET_INT16(pRaw, dataPos, pDb->cfg.hashPrefix, _OVER) SDB_SET_INT16(pRaw, dataPos, pDb->cfg.hashSuffix, _OVER) + SDB_SET_INT32(pRaw, dataPos, pDb->cfg.tsdbPageSize, _OVER) SDB_SET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER) SDB_SET_DATALEN(pRaw, dataPos, _OVER) @@ -214,10 +215,20 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw) { SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.sstTrigger, _OVER) SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.hashPrefix, _OVER) SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.hashSuffix, _OVER) + SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.tsdbPageSize, _OVER) SDB_GET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER) taosInitRWLatch(&pDb->lock); + if (pDb->cfg.tsdbPageSize <= TSDB_MIN_TSDB_PAGESIZE) { + mInfo("db:%s, tsdbPageSize set from %d to default %d", pDb->name, pDb->cfg.tsdbPageSize, + TSDB_DEFAULT_TSDB_PAGESIZE); + } + + if (pDb->cfg.sstTrigger <= TSDB_MIN_STT_TRIGGER) { + mInfo("db:%s, sstTrigger set from %d to default %d", pDb->name, pDb->cfg.sstTrigger, TSDB_DEFAULT_SST_TRIGGER); + } + terrno = 0; _OVER: @@ -262,6 +273,7 @@ static int32_t mndDbActionUpdate(SSdb *pSdb, SDbObj *pOld, SDbObj *pNew) { pOld->cfg.cacheLast = pNew->cfg.cacheLast; pOld->cfg.replications = pNew->cfg.replications; pOld->cfg.sstTrigger = pNew->cfg.sstTrigger; + pOld->cfg.tsdbPageSize = pNew->cfg.tsdbPageSize; taosWUnLockLatch(&pOld->lock); return 0; } @@ -341,6 +353,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) { if (pCfg->sstTrigger < TSDB_MIN_STT_TRIGGER || pCfg->sstTrigger > TSDB_MAX_STT_TRIGGER) return -1; if (pCfg->hashPrefix < TSDB_MIN_HASH_PREFIX || pCfg->hashPrefix > TSDB_MAX_HASH_PREFIX) return -1; if (pCfg->hashSuffix < TSDB_MIN_HASH_SUFFIX || pCfg->hashSuffix > TSDB_MAX_HASH_SUFFIX) return -1; + if (pCfg->tsdbPageSize < TSDB_MIN_TSDB_PAGESIZE || pCfg->tsdbPageSize > TSDB_MAX_TSDB_PAGESIZE) return -1; terrno = 0; return terrno; @@ -377,6 +390,7 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) { if (pCfg->sstTrigger <= 0) pCfg->sstTrigger = TSDB_DEFAULT_SST_TRIGGER; if (pCfg->hashPrefix < 0) pCfg->hashPrefix = TSDB_DEFAULT_HASH_PREFIX; if (pCfg->hashSuffix < 0) pCfg->hashSuffix = TSDB_DEFAULT_HASH_SUFFIX; + if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE; } static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) { @@ -496,6 +510,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, .sstTrigger = pCreate->sstTrigger, .hashPrefix = pCreate->hashPrefix, .hashSuffix = pCreate->hashSuffix, + .tsdbPageSize = pCreate->tsdbPageSize, }; dbObj.cfg.numOfRetensions = pCreate->numOfRetensions; @@ -1726,6 +1741,9 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb, pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.hashSuffix, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.tsdbPageSize, false); } taosMemoryFree(buf); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index de29dea511c169f5b584dded1c07da48b82f4552..a95bdef32302e04ff826d613e33a4fca7ede7f33 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -237,6 +237,7 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg createReq.sstTrigger = pDb->cfg.sstTrigger; createReq.hashPrefix = pDb->cfg.hashPrefix; createReq.hashSuffix = pDb->cfg.hashSuffix; + createReq.tsdbPageSize = pDb->cfg.tsdbPageSize; for (int32_t v = 0; v < pVgroup->replica; ++v) { SReplica *pReplica = &createReq.replicas[v]; diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 9ed72eeecdac435685b1b37d5faa60c9d91bee4c..5adb2eb3592e7bcba7803b5e2972f0bdd3689adb 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -115,6 +115,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) { if (tjsonAddIntegerToObject(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "hashPrefix", pCfg->hashPrefix) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "hashSuffix", pCfg->hashSuffix) < 0) return -1; + if (tjsonAddIntegerToObject(pJson, "tsdbPageSize", pCfg->tsdbPageSize) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1; @@ -215,7 +216,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { tjsonGetNumberValue(pJson, "wal.level", pCfg->walCfg.level, code); if (code < 0) return -1; tjsonGetNumberValue(pJson, "sstTrigger", pCfg->sttTrigger, code); - if (code < 0) return -1; + if (code < 0) pCfg->sttTrigger = TSDB_DEFAULT_SST_TRIGGER; tjsonGetNumberValue(pJson, "hashBegin", pCfg->hashBegin, code); if (code < 0) return -1; tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd, code); @@ -223,9 +224,9 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod, code); if (code < 0) return -1; tjsonGetNumberValue(pJson, "hashPrefix", pCfg->hashPrefix, code); - if (code < 0) return -1; + if (code < 0) pCfg->hashPrefix = TSDB_DEFAULT_HASH_PREFIX; tjsonGetNumberValue(pJson, "hashSuffix", pCfg->hashSuffix, code); - if (code < 0) return -1; + if (code < 0) pCfg->hashSuffix = TSDB_DEFAULT_HASH_SUFFIX; tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum, code); if (code < 0) return -1; @@ -255,6 +256,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { } tjsonGetNumberValue(pJson, "tsdbPageSize", pCfg->tsdbPageSize, code); + if (code < 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE * 1024; return 0; } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 655bb68206975c65022511632c4f8fd56c0699a3..511ae25810f37d8d31d04c41fcf4bfec53610256 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1303,7 +1303,7 @@ SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pD EOperatorType tableCondType) { CHECK_PARSER_STATUS(pCxt); if (needDbShowStmt(type) && NULL == pDbName) { - snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified"); + snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "database not specified"); pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR; return NULL; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 2af4032fd89829cfada3f84afe7ece36af096ec5..f1a59ebf513d82ffa29404ec58b2830ca2a6a044 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1283,6 +1283,36 @@ static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount) return code; } +static bool isCountTbname(SFunctionNode* pFunc) { + if (FUNCTION_TYPE_COUNT != pFunc->funcType || 1 != LIST_LENGTH(pFunc->pParameterList)) { + return false; + } + SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); + return (QUERY_NODE_FUNCTION == nodeType(pPara) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPara)->funcType); +} + +// count(tbname) is rewritten as count(ts) for scannning optimization +static int32_t rewriteCountTbname(STranslateContext* pCxt, SFunctionNode* pCount) { + SFunctionNode* pTbname = (SFunctionNode*)nodesListGetNode(pCount->pParameterList, 0); + const char* pTableAlias = NULL; + if (LIST_LENGTH(pTbname->pParameterList) > 0) { + pTableAlias = ((SValueNode*)nodesListGetNode(pTbname->pParameterList, 0))->literal; + } + STableNode* pTable = NULL; + int32_t code = findTable(pCxt, pTableAlias, &pTable); + if (TSDB_CODE_SUCCESS == code) { + SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pCol) { + code = TSDB_CODE_OUT_OF_MEMORY; + } else { + setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol); + NODES_DESTORY_LIST(pCount->pParameterList); + code = nodesListMakeAppend(&pCount->pParameterList, (SNode*)pCol); + } + } + return code; +} + static bool hasInvalidFuncNesting(SNodeList* pParameterList) { bool hasInvalidFunc = false; nodesWalkExprs(pParameterList, haveVectorFunction, &hasInvalidFunc); @@ -1318,6 +1348,9 @@ static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) { if (isCountStar(pFunc)) { return rewriteCountStar(pCxt, pFunc); } + if (isCountTbname(pFunc)) { + return rewriteCountTbname(pCxt, pFunc); + } return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/test/planOptimizeTest.cpp b/source/libs/planner/test/planOptimizeTest.cpp index 6c5b760564b1e73f09c92fc03d30a958c898c2fc..c2a0aee847f8ad1f6a03cf675c01fabdab3c4eff 100644 --- a/source/libs/planner/test/planOptimizeTest.cpp +++ b/source/libs/planner/test/planOptimizeTest.cpp @@ -35,6 +35,8 @@ TEST_F(PlanOptimizeTest, scanPath) { run("SELECT LAST(c1) FROM t1 WHERE ts BETWEEN '2022-7-29 11:10:10' AND '2022-7-30 11:10:10' INTERVAL(10S) " "FILL(LINEAR)"); + + run("SELECT COUNT(TBNAME) FROM t1"); } TEST_F(PlanOptimizeTest, pushDownCondition) { diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index d848016e46482614972d5e85469e4297136d6cc0..8162b922cec8fd779907df851bc1d8b545cafc28 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -134,8 +134,7 @@ int32_t taosAsyncExec(__async_exec_fn_t execFn, void* execParam, int32_t* code) schedMsg.thandle = execParam; schedMsg.msg = code; - taosScheduleTask(&pTaskQueue, &schedMsg); - return 0; + return taosScheduleTask(&pTaskQueue, &schedMsg); } void destroySendMsgInfo(SMsgSendInfo* pMsgBody) { @@ -472,5 +471,3 @@ int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst) { return TSDB_CODE_SUCCESS; } - - diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c index bb2151ed9ded3728aee58f2994c8a163a354adc0..2cc62d3d6af466921ef10b9fc5871499d4797467 100644 --- a/source/libs/tdb/src/db/tdbPager.c +++ b/source/libs/tdb/src/db/tdbPager.c @@ -185,10 +185,10 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) { // ref page one more time so the page will not be release tdbRefPage(pPage); tdbDebug("pcache/mdirty page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id); - /* + // Set page as dirty pPage->isDirty = 1; - + /* // Add page to dirty list(TODO: NOT use O(n^2) algorithm) for (ppPage = &pPager->pDirty; (*ppPage) && TDB_PAGE_PGNO(*ppPage) < TDB_PAGE_PGNO(pPage); ppPage = &((*ppPage)->pDirtyNext)) { @@ -260,6 +260,7 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) { pPage->isDirty = 0; + // tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage); tdbPCacheRelease(pPager->pCache, pPage, pTxn); } @@ -345,15 +346,19 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn) { } */ // 3, release the dirty pages - for (pPage = pPager->pDirty; pPage; pPage = pPager->pDirty) { - pPager->pDirty = pPage->pDirtyNext; - pPage->pDirtyNext = NULL; + SRBTreeIter iter = tRBTreeIterCreate(&pPager->rbt, 1); + SRBTreeNode *pNode = NULL; + while ((pNode = tRBTreeIterNext(&iter)) != NULL) { + pPage = (SPage *)pNode; pPage->isDirty = 0; + // tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage); tdbPCacheRelease(pPager->pCache, pPage, pTxn); } + tRBTreeCreate(&pPager->rbt, pageCmpFn); + // 4, remove the journal file tdbOsClose(pPager->jfd); tdbOsRemove(pPager->jFileName); diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 4a0008b5ff22283b17e263d9e5168fc4ff0f16fd..dcfa93431b40209e56b0f0f04d3111ece4c893f5 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -374,10 +374,12 @@ void cliHandleResp(SCliConn* conn) { if (pCtx == NULL && CONN_NO_PERSIST_BY_APP(conn)) { tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn); + transFreeMsg(transMsg.pCont); return; } if (CONN_RELEASE_BY_SERVER(conn) && transMsg.info.ahandle == NULL) { tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn); + transFreeMsg(transMsg.pCont); return; } @@ -393,7 +395,7 @@ void cliHandleResp(SCliConn* conn) { } if (CONN_NO_PERSIST_BY_APP(conn)) { - addConnToPool(pThrd->pool, conn); + return addConnToPool(pThrd->pool, conn); } uv_read_start((uv_stream_t*)conn->stream, cliAllocRecvBufferCb, cliRecvCb); diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c index 89471c4347e49152b2b1fff0313ef29da92c70ff..9cf9e2c4316a850ac31b15aaab8a864deb4d32eb 100644 --- a/source/util/src/tsched.c +++ b/source/util/src/tsched.c @@ -149,18 +149,18 @@ void *taosProcessSchedQueue(void *scheduler) { return NULL; } -void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { +int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { SSchedQueue *pSched = (SSchedQueue *)queueScheduler; int32_t ret = 0; if (pSched == NULL) { uError("sched is not ready, msg:%p is dropped", pMsg); - return; + return -1; } if (atomic_load_8(&pSched->stop)) { uError("sched is already stopped, msg:%p is dropped", pMsg); - return; + return -1; } if ((ret = tsem_wait(&pSched->emptySem)) != 0) { @@ -185,6 +185,7 @@ void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno)); ASSERT(0); } + return ret; } void taosCleanUpScheduler(void *param) { @@ -192,11 +193,11 @@ void taosCleanUpScheduler(void *param) { if (pSched == NULL) return; uDebug("start to cleanup %s schedQsueue", pSched->label); - + atomic_store_8(&pSched->stop, 1); taosMsleep(200); - + for (int32_t i = 0; i < pSched->numOfThreads; ++i) { if (taosCheckPthreadValid(pSched->qthread[i])) { tsem_post(&pSched->fullSem); @@ -220,7 +221,7 @@ void taosCleanUpScheduler(void *param) { if (pSched->queue) taosMemoryFree(pSched->queue); if (pSched->qthread) taosMemoryFree(pSched->qthread); - //taosMemoryFree(pSched); + // taosMemoryFree(pSched); } // for debug purpose, dump the scheduler status every 1min. diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py index 99e8cd36a4cf1dae08baf93ef4d6338bb08dc7bd..a44ad4c1d0b5cc8e4ba181c1bf941454929f2df6 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py @@ -22,9 +22,9 @@ from util.dnodes import * class TDTestCase: def caseDescription(self): - ''' + """ [TD-13823] taosBenchmark test cases - ''' + """ return def init(self, conn, logSql): @@ -34,19 +34,19 @@ class TDTestCase: def getPath(self, tool="taosBenchmark"): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] else: - projPath = selfPath[:selfPath.find("tests")] + projPath = selfPath[: selfPath.find("tests")] paths = [] for root, dirs, files in os.walk(projPath): - if ((tool) in files): + if (tool) in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): + if "packaging" not in rootRealPath: paths.append(os.path.join(root, tool)) break - if (len(paths) == 0): + if len(paths) == 0: tdLog.exit("taosBenchmark not found!") return else: @@ -55,7 +55,7 @@ class TDTestCase: def run(self): binPath = self.getPath() - cmd = "%s -n 100 -t 100 -y" %binPath + cmd = "%s -n 100 -t 100 -y" % binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use test") @@ -77,14 +77,16 @@ class TDTestCase: tdSql.checkData(4, 3, "TAG") tdSql.checkData(5, 0, "location") tdSql.checkData(5, 1, "VARCHAR") - tdSql.checkData(5, 2, 16) + tdSql.checkData(5, 2, 24) tdSql.checkData(5, 3, "TAG") tdSql.query("select count(*) from test.meters where groupid >= 0") tdSql.checkData(0, 0, 10000) - tdSql.query("select count(*) from test.meters where location = 'San Francisco' or location = 'Los Angles' or location = 'San Diego' or location = 'San Jose' or \ - location = 'Palo Alto' or location = 'Campbell' or location = 'Mountain View' or location = 'Sunnyvale' or location = 'Santa Clara' or location = 'Cupertino' ") + tdSql.query( + "select count(*) from test.meters where location = 'California.SanFrancisco' or location = 'California.LosAngles' or location = 'California.SanDiego' or location = 'California.SanJose' or \ + location = 'California.PaloAlto' or location = 'California.Campbell' or location = 'California.MountainView' or location = 'California.Sunnyvale' or location = 'California.SantaClara' or location = 'California.Cupertino' " + ) tdSql.checkData(0, 0, 10000) def stop(self): diff --git a/tests/system-test/1-insert/delete_data.py b/tests/system-test/1-insert/delete_data.py index 068d212ac4e49d00fe7ff938118352dbe462a988..f620a4b18a0d7ddf5a0d65c902d0411c270e6463 100644 --- a/tests/system-test/1-insert/delete_data.py +++ b/tests/system-test/1-insert/delete_data.py @@ -81,6 +81,7 @@ class TDTestCase: 'binary':self.binary_val, 'nchar':self.nchar_val } + def insert_base_data(self,col_type,tbname,rows,base_data): for i in range(rows): if col_type.lower() == 'tinyint': @@ -290,6 +291,9 @@ class TDTestCase: self.delete_data_ntb() self.delete_data_ctb() self.delete_data_stb() + tdDnodes.stoptaosd(1) + tdDnodes.starttaosd(1) + self.delete_data_ntb() def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py index deff4b42a1784b5bc59466d1c067536e83c10d06..417adc39acc306fbd7b2d74518653d0347fb27cb 100644 --- a/tests/system-test/1-insert/update_data.py +++ b/tests/system-test/1-insert/update_data.py @@ -154,10 +154,12 @@ class TDTestCase: up_bool = random.randint(0,100)%2 up_float = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX) up_double = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300)) - binary_length = random.randint(0,self.str_length) - nchar_length = random.randint(0,self.str_length) - up_binary = tdCom.getLongName(binary_length) - up_nchar = tdCom.getLongName(nchar_length) + binary_length = [] + for i in range(self.str_length+1): + binary_length.append(i) + nchar_length = [] + for i in range(self.str_length+1): + nchar_length.append(i) for col_name,col_type in column_dict.items(): if tb_type == 'ntb': tdSql.execute(f'create table {tbname} (ts timestamp,{col_name} {col_type})') @@ -188,9 +190,13 @@ class TDTestCase: elif col_type.lower() == 'double': self.update_and_check_data(tbname,col_name,col_type,up_double,dbname) elif 'binary' in col_type.lower(): - self.update_and_check_data(tbname,col_name,col_type,up_binary,dbname) + for i in binary_length: + up_binary = tdCom.getLongName(i) + self.update_and_check_data(tbname,col_name,col_type,up_binary,dbname) elif 'nchar' in col_type.lower(): - self.update_and_check_data(tbname,col_name,col_type,up_nchar,dbname) + for i in nchar_length: + up_nchar = tdCom.getLongName(i) + self.update_and_check_data(tbname,col_name,col_type,up_nchar,dbname) elif col_type.lower() == 'timestamp': self.update_and_check_data(tbname,col_name,col_type,self.ts+1,dbname) tdSql.execute(f'insert into {tbname} values({self.ts},null)')