diff --git a/Jenkinsfile2 b/Jenkinsfile2
index d7df07f06afd8e1e483455e3ce925a03f28740fd..b95b3ff86bf5005f486841abf3c5ce428b3d6957 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -218,12 +218,12 @@ def pre_test_win(){
if (env.CHANGE_URL =~ /\/TDengine\//) {
bat '''
cd %WIN_INTERNAL_ROOT%
- git pull
+ git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
- git pull
+ git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
@@ -236,7 +236,7 @@ def pre_test_win(){
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat '''
cd %WIN_INTERNAL_ROOT%
- git pull
+ git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_INTERNAL_ROOT%
diff --git a/README.md b/README.md
index a8126d74d44b2615f0d0a3e80fc330eaac6a2950..1034a19f37a37085e9ad2bacbad1a6fd05a91321 100644
--- a/README.md
+++ b/README.md
@@ -15,19 +15,19 @@
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
-English | [简体中文](README-CN.md) | [Lean more about TSDB](https://tdengine.com/tsdb)
+English | [简体中文](README-CN.md) | [Learn more about TSDB](https://tdengine.com/tsdb/)
# What is TDengine?
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
-- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
+- **[High Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
-- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
+- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
@@ -232,9 +232,9 @@ After building successfully, TDengine can be installed by
sudo make install
```
-Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section.
+Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
-Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.taosdata.com/get-started/package/) for it.
+Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
To start the service after installation, in a terminal, use:
diff --git a/cmake/cmake.version b/cmake/cmake.version
index d5721f767125721ef2d7ae897789b64debb6d0a1..fcb31adc39bfb4b95519bdf4b1a426fa7256ba3f 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.0.1.1")
+ SET(TD_VER_NUMBER "3.0.1.2")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 5cc580a9c62c49f6de31f87a302f5f4e6ceb0f58..52e2c0944a78ed0114001494e2edd73e3e88049d 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 509ec72
+ GIT_TAG 8207c74
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in
index 04b1262cafd6f1dd984f568b847454c409d301ed..ca8fff8da511ef86baa699af8246822d91982238 100644
--- a/cmake/taosws_CMakeLists.txt.in
+++ b/cmake/taosws_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
- GIT_TAG e771403
+ GIT_TAG 1bdfca3
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md
index 876de50f35ee3ba533bd7d5916632de853a84c0e..60046629a4c6f89ccfe9b20adcbb2fdba2ffb261 100644
--- a/docs/en/12-taos-sql/01-data-type.md
+++ b/docs/en/12-taos-sql/01-data-type.md
@@ -1,70 +1,70 @@
---
sidebar_label: Data Types
title: Data Types
-description: "TDengine supports a variety of data types including timestamp, float, JSON and many others."
+description: 'TDengine supports a variety of data types including timestamp, float, JSON and many others.'
---
## Timestamp
When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below:
-- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
-- Internal function `now` can be used to get the current timestamp on the client side
-- The current timestamp of the client side is applied when `now` is used to insert data
+- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
+- Internal function `NOW` can be used to get the current timestamp on the client side.
+- The current timestamp of the client side is applied when `NOW` is used to insert data.
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
-- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
+- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
```sql
CREATE DATABASE db_name PRECISION 'ns';
```
+
## Data Types
In TDengine, the data types below can be used when specifying a column or tag.
-| # | **type** | **Bytes** | **Description** |
-| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported |
-| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] |
-| 3 | INT UNSIGNED| 4| unsigned integer, the value range is [0, 2^32-1]
-| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] |
-| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1] |
-| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] |
-| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] |
-| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
-| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] |
-| 10 | INT UNSIGNED| 2| unsigned integer, the value range is [0, 65535]|
-| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] |
-| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255] |
-| 13 | BOOL | 1 | Bool, the value range is {true, false} |
-| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
-| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type |
-| 16 | VARCHAR | User-defined | Alias of BINARY |
-
+| # | **type** | **Bytes** | **Description** |
+| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
+| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
+| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
+| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
+| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
+| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
+| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
+| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
+| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
+| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
+| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
+| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
+| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
+| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
+| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
+| 16 | VARCHAR | User-defined | Alias of BINARY |
:::note
-- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes.
-- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
-- The length of BINARY can be up to 16374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
+
+- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
+- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
:::
-
## Constants
+
TDengine supports a variety of constants:
-| # | **Syntax** | **Type** | **Description** |
-| --- | :-------: | --------- | -------------------------------------- |
-| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
-| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
-| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
-| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash (\'). |
-| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash (\"). |
-| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
-| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
-| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
+| # | **Syntax** | **Type** | **Description** |
+| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
+| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
+| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
+| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash `\'`. |
+| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash `\"`. |
+| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
+| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
+| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
:::note
Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md
index e409973173cdf34aade9971612e39a2765630d4a..786444ad77a4ba8cd3c4729863dfae0a53cef798 100644
--- a/docs/en/12-taos-sql/06-select.md
+++ b/docs/en/12-taos-sql/06-select.md
@@ -68,7 +68,7 @@ A query can be performed on some or all columns. Data and tag columns can all be
### Wildcards
-You can use an asterisk (\*) as a wildcard character to indicate all columns. For standard tables, the asterisk indicates only data columns. For supertables and subtables, tag columns are also included.
+You can use an asterisk (\*) as a wildcard character to indicate all columns. For normal tables or sub-tables, the asterisk indicates only data columns. For supertables, tag columns are also included when using asterisk (\*).
```sql
SELECT * FROM d1001;
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index e3901114d3271d040934f0a211135beecbdc2f7f..414986d107cb561d23b215a2e3b806f98542182e 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -6,6 +6,9 @@ description: TDengine release history, Release Notes and download links.
import Release from "/components/ReleaseV3";
+## 3.0.1.2
+
+
## 3.0.1.1
diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md
index 228990df3b15bf7ee111433994813dbb171843ab..086d3adea2d43f9b3eebdfb9520236710e306582 100644
--- a/docs/en/28-releases/02-tools.md
+++ b/docs/en/28-releases/02-tools.md
@@ -6,6 +6,10 @@ description: taosTools release history, Release Notes, download links.
import Release from "/components/ReleaseV3";
+## 2.2.2
+
+
+
## 2.2.0
diff --git a/docs/examples/csharp/.gitignore b/docs/examples/csharp/.gitignore
index b3aff79f3706e23aa74199a7f521f7912d2b0e45..694da603b924844cf0e2f30d038b6eafdc2ec0d1 100644
--- a/docs/examples/csharp/.gitignore
+++ b/docs/examples/csharp/.gitignore
@@ -1,4 +1,12 @@
bin
obj
.vs
-*.sln
\ No newline at end of file
+*.sln
+wsConnect/obj
+wsInsert/obj
+wsQuery/obj
+wsStmt/obj
+wsConnect/bin
+wsInsert/bin
+wsQuery/bin
+wsStmt/bin
\ No newline at end of file
diff --git a/docs/examples/csharp/wsConnect/Program.cs b/docs/examples/csharp/wsConnect/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..2e89372c3e3dd23c16bad0362f494b2c64191cbc
--- /dev/null
+++ b/docs/examples/csharp/wsConnect/Program.cs
@@ -0,0 +1,25 @@
+using System;
+using TDengineWS.Impl;
+
+namespace Examples
+{
+ public class WSConnExample
+ {
+ static void Main(string[] args)
+ {
+ string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
+ IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+ if (wsConn == IntPtr.Zero)
+ {
+ throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ }
+ else
+ {
+ Console.WriteLine("Establish connect success.");
+ }
+
+ // close connection.
+ LibTaosWS.WSClose(wsConn);
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/csharp/wsConnect/wsConnect.csproj b/docs/examples/csharp/wsConnect/wsConnect.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..34951dc761903e5a4b7a4bec5dfe55a965ab88be
--- /dev/null
+++ b/docs/examples/csharp/wsConnect/wsConnect.csproj
@@ -0,0 +1,13 @@
+
+
+
+ Exe
+ net5.0
+ enable
+
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsInsert/Program.cs b/docs/examples/csharp/wsInsert/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..4ff830b437fa21cb76474de1bb73c82fb71558ef
--- /dev/null
+++ b/docs/examples/csharp/wsInsert/Program.cs
@@ -0,0 +1,58 @@
+using System;
+using TDengineWS.Impl;
+
+namespace Examples
+{
+ public class WSInsertExample
+ {
+ static void Main(string[] args)
+ {
+ string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
+ IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+
+ // Assert if connection is validate
+ if (wsConn == IntPtr.Zero)
+ {
+ throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ }
+ else
+ {
+ Console.WriteLine("Establish connect success.");
+ }
+
+ string createTable = "CREATE STABLE test.meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);";
+ string insert = "INSERT INTO test.d1001 USING test.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" +
+ "test.d1002 USING test.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" +
+ "test.d1003 USING test.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
+ "test.d1004 USING test.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
+
+ IntPtr wsRes = LibTaosWS.WSQuery(wsConn, createTable);
+ ValidInsert("create table", wsRes);
+ LibTaosWS.WSFreeResult(wsRes);
+
+ wsRes = LibTaosWS.WSQuery(wsConn, insert);
+ ValidInsert("insert data", wsRes);
+ LibTaosWS.WSFreeResult(wsRes);
+
+ // close connection.
+ LibTaosWS.WSClose(wsConn);
+ }
+
+ static void ValidInsert(string desc, IntPtr wsRes)
+ {
+ int code = LibTaosWS.WSErrorNo(wsRes);
+ if (code != 0)
+ {
+ throw new Exception($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
+ }
+ else
+ {
+ Console.WriteLine("{0} success affect {2} rows, cost {1} nanoseconds", desc, LibTaosWS.WSTakeTiming(wsRes),LibTaosWS.WSAffectRows(wsRes));
+ }
+ }
+ }
+
+}
+// Establish connect success.
+// create table success affect 0 rows, cost 3717542 nanoseconds
+// insert data success affect 8 rows, cost 2613637 nanoseconds
\ No newline at end of file
diff --git a/docs/examples/csharp/wsInsert/wsInsert.csproj b/docs/examples/csharp/wsInsert/wsInsert.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..34951dc761903e5a4b7a4bec5dfe55a965ab88be
--- /dev/null
+++ b/docs/examples/csharp/wsInsert/wsInsert.csproj
@@ -0,0 +1,13 @@
+
+
+
+ Exe
+ net5.0
+ enable
+
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsQuery/Program.cs b/docs/examples/csharp/wsQuery/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..bf3cf2bbe288a23d989656ad3efa20f22dac89c3
--- /dev/null
+++ b/docs/examples/csharp/wsQuery/Program.cs
@@ -0,0 +1,74 @@
+using System;
+using TDengineWS.Impl;
+using System.Collections.Generic;
+using TDengineDriver;
+
+namespace Examples
+{
+ public class WSQueryExample
+ {
+ static void Main(string[] args)
+ {
+ string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
+ IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+ if (wsConn == IntPtr.Zero)
+ {
+ throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ }
+ else
+ {
+ Console.WriteLine("Establish connect success.");
+ }
+
+ string select = "select * from test.meters";
+
+ // optional:wsRes = LibTaosWS.WSQuery(wsConn, select);
+ IntPtr wsRes = LibTaosWS.WSQueryTimeout(wsConn, select, 1);
+ // Assert if query execute success.
+ int code = LibTaosWS.WSErrorNo(wsRes);
+ if (code != 0)
+ {
+ throw new Exception($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
+ }
+
+ // get meta data
+ List metas = LibTaosWS.WSGetFields(wsRes);
+ // get retrieved data
+ List dataSet = LibTaosWS.WSGetData(wsRes);
+
+ // do something with result.
+ foreach (var meta in metas)
+ {
+ Console.Write("{0} {1}({2}) \t|\t", meta.name, meta.TypeName(), meta.size);
+ }
+ Console.WriteLine("");
+
+ for (int i = 0; i < dataSet.Count;)
+ {
+ for (int j = 0; j < metas.Count; j++)
+ {
+ Console.Write("{0}\t|\t", dataSet[i]);
+ i++;
+ }
+ Console.WriteLine("");
+ }
+
+ // Free result after use.
+ LibTaosWS.WSFreeResult(wsRes);
+
+ // close connection.
+ LibTaosWS.WSClose(wsConn);
+ }
+ }
+}
+
+// Establish connect success.
+// ts TIMESTAMP(8) | current FLOAT(4) | voltage INT(4) | phase FLOAT(4) | location BINARY(64) | groupid INT(4) |
+// 1538548685000 | 10.8 | 223 | 0.29 | California.LosAngeles | 3 |
+// 1538548686500 | 11.5 | 221 | 0.35 | California.LosAngeles | 3 |
+// 1538548685500 | 11.8 | 221 | 0.28 | California.LosAngeles | 2 |
+// 1538548696600 | 13.4 | 223 | 0.29 | California.LosAngeles | 2 |
+// 1538548685000 | 10.3 | 219 | 0.31 | California.SanFrancisco | 2 |
+// 1538548695000 | 12.6 | 218 | 0.33 | California.SanFrancisco | 2 |
+// 1538548696800 | 12.3 | 221 | 0.31 | California.SanFrancisco | 2 |
+// 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 |
\ No newline at end of file
diff --git a/docs/examples/csharp/wsQuery/wsQuery.csproj b/docs/examples/csharp/wsQuery/wsQuery.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..34951dc761903e5a4b7a4bec5dfe55a965ab88be
--- /dev/null
+++ b/docs/examples/csharp/wsQuery/wsQuery.csproj
@@ -0,0 +1,13 @@
+
+
+
+ Exe
+ net5.0
+ enable
+
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsStmt/Program.cs b/docs/examples/csharp/wsStmt/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..54de77ec1f98deaf14fd2ad9ca0acd57e6b38f63
--- /dev/null
+++ b/docs/examples/csharp/wsStmt/Program.cs
@@ -0,0 +1,95 @@
+using System;
+using TDengineWS.Impl;
+using TDengineDriver;
+using System.Runtime.InteropServices;
+
+namespace Examples
+{
+ public class WSStmtExample
+ {
+ static void Main(string[] args)
+ {
+ const string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
+ const string table = "meters";
+ const string database = "test";
+ const string childTable = "d1005";
+ string insert = $"insert into ? using {database}.{table} tags(?,?) values(?,?,?,?)";
+ const int numOfTags = 2;
+ const int numOfColumns = 4;
+
+ // Establish connection
+ IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+ if (wsConn == IntPtr.Zero)
+ {
+ throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ }
+ else
+ {
+ Console.WriteLine("Establish connect success...");
+ }
+
+ // init stmt
+ IntPtr wsStmt = LibTaosWS.WSStmtInit(wsConn);
+ if (wsStmt != IntPtr.Zero)
+ {
+ int code = LibTaosWS.WSStmtPrepare(wsStmt, insert);
+ ValidStmtStep(code, wsStmt, "WSStmtPrepare");
+
+ TAOS_MULTI_BIND[] wsTags = new TAOS_MULTI_BIND[] { WSMultiBind.WSBindNchar(new string[] { "California.SanDiego" }), WSMultiBind.WSBindInt(new int?[] { 4 }) };
+ code = LibTaosWS.WSStmtSetTbnameTags(wsStmt, $"{database}.{childTable}", wsTags, numOfTags);
+ ValidStmtStep(code, wsStmt, "WSStmtSetTbnameTags");
+
+ TAOS_MULTI_BIND[] data = new TAOS_MULTI_BIND[4];
+ data[0] = WSMultiBind.WSBindTimestamp(new long[] { 1538548687000, 1538548688000, 1538548689000, 1538548690000, 1538548691000 });
+ data[1] = WSMultiBind.WSBindFloat(new float?[] { 10.30F, 10.40F, 10.50F, 10.60F, 10.70F });
+ data[2] = WSMultiBind.WSBindInt(new int?[] { 223, 221, 222, 220, 219 });
+ data[3] = WSMultiBind.WSBindFloat(new float?[] { 0.31F, 0.32F, 0.33F, 0.35F, 0.28F });
+ code = LibTaosWS.WSStmtBindParamBatch(wsStmt, data, numOfColumns);
+ ValidStmtStep(code, wsStmt, "WSStmtBindParamBatch");
+
+ code = LibTaosWS.WSStmtAddBatch(wsStmt);
+ ValidStmtStep(code, wsStmt, "WSStmtAddBatch");
+
+ IntPtr stmtAffectRowPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(Int32)));
+ code = LibTaosWS.WSStmtExecute(wsStmt, stmtAffectRowPtr);
+ ValidStmtStep(code, wsStmt, "WSStmtExecute");
+ Console.WriteLine("WS STMT insert {0} rows...", Marshal.ReadInt32(stmtAffectRowPtr));
+ Marshal.FreeHGlobal(stmtAffectRowPtr);
+
+ LibTaosWS.WSStmtClose(wsStmt);
+
+ // Free unmanaged memory
+ WSMultiBind.WSFreeTaosBind(wsTags);
+ WSMultiBind.WSFreeTaosBind(data);
+
+ //check result with SQL "SELECT * FROM test.d1005;"
+ }
+ else
+ {
+ throw new Exception("Init STMT failed...");
+ }
+
+ // close connection.
+ LibTaosWS.WSClose(wsConn);
+ }
+
+ static void ValidStmtStep(int code, IntPtr wsStmt, string desc)
+ {
+ if (code != 0)
+ {
+ throw new Exception($"{desc} failed,reason: {LibTaosWS.WSErrorStr(wsStmt)}, code: {code}");
+ }
+ else
+ {
+ Console.WriteLine("{0} success...", desc);
+ }
+ }
+ }
+}
+
+// WSStmtPrepare success...
+// WSStmtSetTbnameTags success...
+// WSStmtBindParamBatch success...
+// WSStmtAddBatch success...
+// WSStmtExecute success...
+// WS STMT insert 5 rows...
\ No newline at end of file
diff --git a/docs/examples/csharp/wsStmt/wsStmt.csproj b/docs/examples/csharp/wsStmt/wsStmt.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..34951dc761903e5a4b7a4bec5dfe55a965ab88be
--- /dev/null
+++ b/docs/examples/csharp/wsStmt/wsStmt.csproj
@@ -0,0 +1,13 @@
+
+
+
+ Exe
+ net5.0
+ enable
+
+
+
+
+
+
+
diff --git a/docs/zh/07-develop/01-connect/_connect_cs.mdx b/docs/zh/07-develop/01-connect/_connect_cs.mdx
index 13b8a5dff250e6143fbed3090ba1f35e74adb9a0..9d0755fc646edb6f6ea9b834428b2b20e2044b4b 100644
--- a/docs/zh/07-develop/01-connect/_connect_cs.mdx
+++ b/docs/zh/07-develop/01-connect/_connect_cs.mdx
@@ -2,7 +2,7 @@
{{#include docs/examples/csharp/ConnectExample.cs}}
```
-:::info
-C# 连接器目前只支持原生连接。
+```csharp title="WebSocket 连接"
+{{#include docs/examples/csharp/wsConnect/Program.cs}}
+```
-:::
diff --git a/docs/zh/08-connector/40-csharp.mdx b/docs/zh/08-connector/40-csharp.mdx
index 4e49d84835d66622293e607a58699ae93fc7013d..9ba8be2c22fb46df8a5c73c499a7b22c6468f280 100644
--- a/docs/zh/08-connector/40-csharp.mdx
+++ b/docs/zh/08-connector/40-csharp.mdx
@@ -17,7 +17,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。
-`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../rest-api/) 文档自行编写。
+`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 还支持 WebSocket,通过 DSN 建立 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能。
本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。
@@ -35,12 +35,29 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
## 支持的功能特性
+
+
+
+
1. 连接管理
2. 普通查询
3. 连续查询
4. 参数绑定
-5. 订阅功能
+5. 数据订阅(TMQ)
6. Schemaless
+
+
+
+
+
+1. 连接管理
+2. 普通查询
+3. 连续查询
+4. 参数绑定
+
+
+
+
## 安装步骤
@@ -79,7 +96,13 @@ dotnet add exmaple.csproj reference src/TDengine.csproj
## 建立连接
-``` C#
+
+
+
+
+使用 host、username、password、port 等信息建立连接。
+
+``` csharp
using TDengineDriver;
namespace TDengineExample
@@ -109,17 +132,63 @@ namespace TDengineExample
}
}
}
+```
+
+
+
+
+使用 DSN 建立 WebSocket 连接 DSN 连接。 描述字符串基本结构如下:
+
+```text
+[]://[[:@]:][/][?=[&=]]
+|------------|---|-----------|-----------|------|------|------------|-----------------------|
+| protocol | | username | password | host | port | database | params |
+```
+
+各部分意义见下表:
+
+* **protocol**: 显示指定以何种方式建立连接,例如:`ws://localhost:6041` 指定以 Websocket 方式建立连接(支持http/ws)。
+
+* **username/password**: 用于创建连接的用户名及密码(默认`root/taosdata`)。
+
+* **host/port**: 指定创建连接的服务器及端口,WebSocket 连接默认为 `localhost:6041` 。
+
+* **database**: 指定默认连接的数据库名。
+
+* **params**:其他可选参数。
+
+``` csharp
+{{#include docs/examples/csharp/wsConnect/Program.cs}}
```
+
+
+
## 使用示例
### 写入数据
#### SQL 写入
+
+
+
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsInsert/Program.cs}}
+```
+
+
+
+
+
#### InfluxDB 行协议写入
@@ -132,12 +201,50 @@ namespace TDengineExample
+#### 参数绑定
+
+
+
+
+
+``` csharp
+{{#include docs/examples/csharp/StmtInsertExample.cs}}
+```
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsStmt/Program.cs}}
+```
+
+
+
+
+
### 查询数据
#### 同步查询
+
+
+
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsQuery/Program.cs}}
+```
+
+
+
+
+
#### 异步查询
@@ -151,12 +258,15 @@ namespace TDengineExample
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
-| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
+| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
+| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
+| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
## 重要更新记录
| TDengine.Connector | 说明 |
|--------------------|--------------------------------|
+| 3.0.1 | 支持 WebSocket 和 Cloud,查询,插入,参数绑定。 |
| 3.0.0 | 支持 TDengine 3.0.0.0,不兼容 2.x。新增接口TDengine.Impl.GetData(),解析查询结果。 |
| 1.0.7 | 修复 TDengine.Query()内存泄露。 |
| 1.0.6 | 修复 schemaless 在 1.0.4 和 1.0.5 中失效 bug。 |
diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md
index ee7b3a4715a11346b9a06da20dbc93ef309c0a3d..128fa20930d1b94b905a20fd1dde853d63e2b0c4 100644
--- a/docs/zh/12-taos-sql/01-data-type.md
+++ b/docs/zh/12-taos-sql/01-data-type.md
@@ -1,7 +1,7 @@
---
sidebar_label: 数据类型
title: 数据类型
-description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
+description: 'TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等'
---
## 时间戳
@@ -9,64 +9,65 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类
使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则:
- 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128`
-- 内部函数 now 是客户端的当前时间
-- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
-- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似。
-- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。
+- 内部函数 NOW 是客户端的当前时间
+- 插入记录时,如果时间戳为 NOW,插入数据时使用提交这条记录的客户端的当前时间
+- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑相同。
+- 时间可以加减,比如 NOW-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w`,表示查询两周前整整一周的数据。在指定降采样操作(Down Sampling)的时间窗口(Interval)时,时间单位还可以使用 n(自然月)和 y(自然年)。
-TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。
+TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 `PRECISION` 参数也可以支持微秒和纳秒。
```sql
CREATE DATABASE db_name PRECISION 'ns';
```
+
## 数据类型
在 TDengine 中,普通表的数据模型中可使用以下数据类型。
-| # | **类型** | **Bytes** | **说明** |
-| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 |
-| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] |
-| 3 | INT UNSIGNED| 4| 无符号整数,[0, 2^32-1]
-| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] |
-| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] |
-| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
-| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
-| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 |
-| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
-| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 65535] |
-| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
-| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
-| 13 | BOOL | 1 | 布尔型,{true, false} |
-| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
-| 15 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 |
-| 16 | VARCHAR | 自定义 | BINARY类型的别名 |
-
+| # | **类型** | **Bytes** | **说明** |
+| --- | :---------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 |
+| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] |
+| 3 | INT UNSIGNED | 4 | 无符号整数,[0, 2^32-1] |
+| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] |
+| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] |
+| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
+| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
+| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 NCHAR |
+| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
+| 10 | SMALLINT UNSIGNED | 2 | 无符号短整型,范围 [0, 65535] |
+| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
+| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
+| 13 | BOOL | 1 | 布尔型,{true, false} |
+| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符,会固定占用 40 字节的空间。如果用户字符串长度超出声明长度,将会报错。 |
+| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
+| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
:::note
-- TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
+
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
-- BINARY 类型理论上最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。
+- BINARY 类型理论上最长可以有 16,374 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
-
## 常量
-TDengine支持多个类型的常量,细节如下表:
-
-| # | **语法** | **类型** | **说明** |
-| --- | :-------: | --------- | -------------------------------------- |
-| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为BIGINT。如果用户输入超过了BIGINT的表示范围,TDengine 按BIGINT对数值进行截断。|
-| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为DOUBLE。TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。|
-| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为DOUBLE。|
-| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 \'。|
-| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 \"。 |
-| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP关键字表示后面的字符串字面量需要被解释为TIMESTAMP类型。字符串需要满足YYYY-MM-DD HH:mm:ss.MS格式,其时间分辨率为当前数据库的时间分辨率。 |
-| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 |
-| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。|
+
+TDengine 支持多个类型的常量,细节如下表:
+
+| # | **语法** | **类型** | **说明** |
+| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为 BIGINT。如果用户输入超过了 BIGINT 的表示范围,TDengine 按 BIGINT 对数值进行截断。 |
+| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为 DOUBLE。TDengine 依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。 |
+| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为 DOUBLE。 |
+| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为 BINARY,BINARY 的 Size 为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。 |
+| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为 BINARY,BINARY 的 Size 为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 `\"`。 |
+| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP 关键字表示后面的字符串字面量需要被解释为 TIMESTAMP 类型。字符串需要满足 YYYY-MM-DD HH:mm:ss.MS 格式,其时间分辨率为当前数据库的时间分辨率。 |
+| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 |
+| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。 |
:::note
-- TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999会认为超过长整型的上边界而溢出,而9999999999999999999.0会被认为是有效的浮点数。
+
+- TDengine 依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 5841904e612cb11a77d3038c411b451ef338d467..394e4a99d59aa3a1a59c2cb483bb2eefd71e44db 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -69,7 +69,7 @@ order_expr:
### 通配符
-通配符 \* 可以用于代指全部列。对于普通表,结果中只有普通列。对于超级表和子表,还包含了 TAG 列。
+通配符 \* 可以用于代指全部列。对于普通表和子表,结果中只有普通列。对于超级表,还包含了 TAG 列。
```sql
SELECT * FROM d1001;
diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md
index 86e9aaa80fdebecbd390ae231437491530d6c2b5..9726406b4d875de3e859c979e013630d496fb468 100644
--- a/docs/zh/12-taos-sql/10-function.md
+++ b/docs/zh/12-taos-sql/10-function.md
@@ -1233,7 +1233,7 @@ SELECT SERVER_VERSION();
### SERVER_STATUS
```sql
-SELECT SERVER_VERSION();
+SELECT SERVER_STATUS();
```
**说明**:返回服务端当前的状态。
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index 59a241b6c8458b42313b55f8e814ea574c6fd32c..b05cf7a942901efacb3512b9d6721b75e252f7c5 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -6,6 +6,9 @@ description: TDengine 发布历史、Release Notes 及下载链接
import Release from "/components/ReleaseV3";
+## 3.0.1.2
+
+
## 3.0.1.1
diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md
index 9ff8f4222471cc4245885909e2255869ccf1cb99..f793981d064ef1dfe6962a454cdc284763667070 100644
--- a/docs/zh/28-releases/02-tools.md
+++ b/docs/zh/28-releases/02-tools.md
@@ -6,6 +6,10 @@ description: taosTools 的发布历史、Release Notes 和下载链接
import Release from "/components/ReleaseV3";
+## 2.2.2
+
+
+
## 2.2.0
diff --git a/examples/lua/OpenResty/so/luaconnector51.so b/examples/lua/OpenResty/so/luaconnector51.so
index 442de6e39f909e1aeb869988722b84795c048855..168d3a9d2406680ceec3c12f29a8157d19aca2ff 100755
Binary files a/examples/lua/OpenResty/so/luaconnector51.so and b/examples/lua/OpenResty/so/luaconnector51.so differ
diff --git a/examples/lua/lua51/lua_connector51.c b/examples/lua/lua51/lua_connector51.c
index 578622bf1fb50f428a4ba44b3b02c4eeed2508b2..4c702b2aaeac163f73bc0b2503449dd68a25150d 100644
--- a/examples/lua/lua51/lua_connector51.c
+++ b/examples/lua/lua51/lua_connector51.c
@@ -2,7 +2,7 @@
#include
#include
#include
-#include "../../../../include/client/taos.h"
+#include "taos.h"
#include "lauxlib.h"
#include "lua.h"
#include "lualib.h"
@@ -35,7 +35,7 @@ static int l_connect(lua_State *L){
}
lua_getfield(L, 1, "port");
- if (lua_isnumber(L,-1)){
+ if (lua_isnumber(L, -1)){
port = lua_tonumber(L, -1);
//printf("port = %d\n", port);
}
@@ -60,7 +60,6 @@ static int l_connect(lua_State *L){
lua_settop(L,0);
- taos_init();
lua_newtable(L);
int table_index = lua_gettop(L);
@@ -102,7 +101,7 @@ static int l_query(lua_State *L){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
- lua_pushstring(L, taos_errstr(taos));
+ lua_pushstring(L, taos_errstr(result));
lua_setfield(L, table_index, "error");
return 1;
@@ -113,7 +112,6 @@ static int l_query(lua_State *L){
int rows = 0;
int num_fields = taos_field_count(result);
const TAOS_FIELD *fields = taos_fetch_fields(result);
- //char temp[256];
const int affectRows = taos_affected_rows(result);
// printf(" affect rows:%d\r\n", affectRows);
@@ -122,7 +120,7 @@ static int l_query(lua_State *L){
lua_pushinteger(L, affectRows);
lua_setfield(L, table_index, "affected");
lua_newtable(L);
-
+
while ((row = taos_fetch_row(result))) {
//printf("row index:%d\n",rows);
rows++;
@@ -136,7 +134,7 @@ static int l_query(lua_State *L){
}
lua_pushstring(L,fields[i].name);
-
+ int32_t* length = taos_fetch_lengths(result);
switch (fields[i].type) {
case TSDB_DATA_TYPE_TINYINT:
lua_pushinteger(L,*((char *)row[i]));
@@ -158,7 +156,8 @@ static int l_query(lua_State *L){
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
- lua_pushstring(L,(char *)row[i]);
+ //printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
+ lua_pushlstring(L,(char *)row[i], length[i]);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
lua_pushinteger(L,*((int64_t *)row[i]));
@@ -166,6 +165,7 @@ static int l_query(lua_State *L){
case TSDB_DATA_TYPE_BOOL:
lua_pushinteger(L,*((char *)row[i]));
break;
+ case TSDB_DATA_TYPE_NULL:
default:
lua_pushnil(L);
break;
@@ -235,112 +235,6 @@ static int l_async_query(lua_State *L){
return 1;
}
-void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
- struct cb_param* p = (struct cb_param*) param;
- TAOS_FIELD *fields = taos_fetch_fields(result);
- int numFields = taos_num_fields(result);
-
- // printf("\nnumfields:%d\n", numFields);
- //printf("\n\r-----------------------------------------------------------------------------------\n");
-
- lua_State *L = p->state;
- lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
-
- lua_newtable(L);
-
- for (int i = 0; i < numFields; ++i) {
- if (row[i] == NULL) {
- continue;
- }
-
- lua_pushstring(L,fields[i].name);
-
- switch (fields[i].type) {
- case TSDB_DATA_TYPE_TINYINT:
- lua_pushinteger(L,*((char *)row[i]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- lua_pushinteger(L,*((short *)row[i]));
- break;
- case TSDB_DATA_TYPE_INT:
- lua_pushinteger(L,*((int *)row[i]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- lua_pushinteger(L,*((int64_t *)row[i]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- lua_pushnumber(L,*((float *)row[i]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- lua_pushnumber(L,*((double *)row[i]));
- break;
- case TSDB_DATA_TYPE_BINARY:
- case TSDB_DATA_TYPE_NCHAR:
- lua_pushstring(L,(char *)row[i]);
- break;
- case TSDB_DATA_TYPE_TIMESTAMP:
- lua_pushinteger(L,*((int64_t *)row[i]));
- break;
- case TSDB_DATA_TYPE_BOOL:
- lua_pushinteger(L,*((char *)row[i]));
- break;
- default:
- lua_pushnil(L);
- break;
- }
-
- lua_settable(L, -3);
- }
-
- lua_call(L, 1, 0);
-
- // printf("-----------------------------------------------------------------------------------\n\r");
-}
-
-static int l_open_stream(lua_State *L){
- int r = luaL_ref(L, LUA_REGISTRYINDEX);
- TAOS * taos = (TAOS*)lua_topointer(L,1);
- const char * sqlstr = lua_tostring(L,2);
- int stime = luaL_checknumber(L,3);
-
- lua_newtable(L);
- int table_index = lua_gettop(L);
-
- struct cb_param *p = malloc(sizeof(struct cb_param));
- p->state = L;
- p->callback=r;
- // printf("r:%d, L:%d\n",r,L);
- void * s = taos_open_stream(taos,sqlstr,stream_cb,stime,p,NULL);
- if (s == NULL) {
- printf("failed to open stream, reason:%s\n", taos_errstr(taos));
- free(p);
- lua_pushnumber(L, -1);
- lua_setfield(L, table_index, "code");
- lua_pushstring(L, taos_errstr(taos));
- lua_setfield(L, table_index, "error");
- lua_pushlightuserdata(L,NULL);
- lua_setfield(L, table_index, "stream");
- }else{
- // printf("success to open stream\n");
- lua_pushnumber(L, 0);
- lua_setfield(L, table_index, "code");
- lua_pushstring(L, taos_errstr(taos));
- lua_setfield(L, table_index, "error");
- p->stream = s;
- lua_pushlightuserdata(L,p);
- lua_setfield(L, table_index, "stream");//stream has different content in lua and c.
- }
-
- return 1;
-}
-
-static int l_close_stream(lua_State *L){
- //TODO:get stream and free cb_param
- struct cb_param *p = lua_touserdata(L,1);
- taos_close_stream(p->stream);
- free(p);
- return 0;
-}
static int l_close(lua_State *L){
TAOS *taos= (TAOS*)lua_topointer(L,1);
@@ -367,8 +261,6 @@ static const struct luaL_Reg lib[] = {
{"query", l_query},
{"query_a",l_async_query},
{"close", l_close},
- {"open_stream", l_open_stream},
- {"close_stream", l_close_stream},
{NULL, NULL}
};
diff --git a/examples/lua/lua_connector.c b/examples/lua/lua_connector.c
index 3c13b196b991a6bdf18493c111d37028fcb5de9a..ce13ab3829dec17e4df97ab94f358bd128e80cf1 100644
--- a/examples/lua/lua_connector.c
+++ b/examples/lua/lua_connector.c
@@ -5,7 +5,7 @@
#include
#include
#include
-#include "taos.h"
+#include
struct cb_param{
lua_State* state;
@@ -60,6 +60,8 @@ static int l_connect(lua_State *L){
lua_settop(L,0);
+ taos_init();
+
lua_newtable(L);
int table_index = lua_gettop(L);
diff --git a/examples/lua/test.lua b/examples/lua/test.lua
index 89c0904c6a04ecec79a95cb1f710136e93a4a00b..3d725cc6a368a4d263729a35612ae0461c86b5ab 100644
--- a/examples/lua/test.lua
+++ b/examples/lua/test.lua
@@ -9,6 +9,50 @@ local config = {
max_packet_size = 1024 * 1024
}
+function dump(obj)
+ local getIndent, quoteStr, wrapKey, wrapVal, dumpObj
+ getIndent = function(level)
+ return string.rep("\t", level)
+ end
+ quoteStr = function(str)
+ return '"' .. string.gsub(str, '"', '\\"') .. '"'
+ end
+ wrapKey = function(val)
+ if type(val) == "number" then
+ return "[" .. val .. "]"
+ elseif type(val) == "string" then
+ return "[" .. quoteStr(val) .. "]"
+ else
+ return "[" .. tostring(val) .. "]"
+ end
+ end
+ wrapVal = function(val, level)
+ if type(val) == "table" then
+ return dumpObj(val, level)
+ elseif type(val) == "number" then
+ return val
+ elseif type(val) == "string" then
+ return quoteStr(val)
+ else
+ return tostring(val)
+ end
+ end
+ dumpObj = function(obj, level)
+ if type(obj) ~= "table" then
+ return wrapVal(obj)
+ end
+ level = level + 1
+ local tokens = {}
+ tokens[#tokens + 1] = "{"
+ for k, v in pairs(obj) do
+ tokens[#tokens + 1] = getIndent(level) .. wrapKey(k) .. " = " .. wrapVal(v, level) .. ","
+ end
+ tokens[#tokens + 1] = getIndent(level - 1) .. "}"
+ return table.concat(tokens, "\n")
+ end
+ return dumpObj(obj, 0)
+end
+
local conn
local res = driver.connect(config)
if res.code ~=0 then
@@ -37,7 +81,7 @@ else
print("select db--- pass.")
end
-res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))")
+res = driver.query(conn,"create table m1 (ts timestamp, speed int, owner binary(20), mark nchar(30))")
if res.code ~=0 then
print("create table---failed: "..res.error)
return
@@ -45,7 +89,7 @@ else
print("create table--- pass.")
end
-res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')")
+res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 0, 'robotspace', '世界人民大团结万岁'), ('2019-09-01 00:00:00.002', 1, 'Hilink', '⾾⾿⿀⿁⿂⿃⿄⿅⿆⿇⿈⿉⿊⿋⿌⿍⿎⿏⿐⿑⿒⿓⿔⿕'),('2019-09-01 00:00:00.003', 2, 'Harmony', '₠₡₢₣₤₥₦₧₨₩₪₫€₭₮₯₰₱₲₳₴₵')")
if res.code ~=0 then
print("insert records failed: "..res.error)
return
@@ -64,21 +108,25 @@ if res.code ~=0 then
return
else
if (#(res.item) == 3) then
- print("select--- pass")
+ print("select--- pass")
+ print(res.item[1].mark)
+ print(res.item[2].mark)
+ print(res.item[3].mark)
+
else
print("select--- failed: expect 3 affected records, actually received "..#(res.item))
end
end
-res = driver.query(conn,"CREATE TABLE thermometer (ts timestamp, degree double) TAGS(location binary(20), type int)")
+res = driver.query(conn,"create table thermometer (ts timestamp, degree double) tags(location binary(20), type int)")
if res.code ~=0 then
print(res.error)
return
else
print("create super table--- pass")
end
-res = driver.query(conn,"CREATE TABLE therm1 USING thermometer TAGS ('beijing', 1)")
+res = driver.query(conn,"create table therm1 using thermometer tags ('beijing', 1)")
if res.code ~=0 then
print(res.error)
return
@@ -86,7 +134,7 @@ else
print("create table--- pass")
end
-res = driver.query(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.001', 20),('2019-09-01 00:00:00.002', 21)")
+res = driver.query(conn,"insert into therm1 values ('2019-09-01 00:00:00.001', 20),('2019-09-01 00:00:00.002', 21)")
if res.code ~=0 then
print(res.error)
@@ -99,14 +147,14 @@ else
end
end
-res = driver.query(conn,"SELECT COUNT(*) count, AVG(degree) AS av, MAX(degree), MIN(degree) FROM thermometer WHERE location='beijing' or location='tianjin' GROUP BY location, type")
+res = driver.query(conn,"select count(*) as cnt, avg(degree) as av, max(degree), min(degree) from thermometer where location='beijing' or location='tianjin' group by location, type")
if res.code ~=0 then
print("select from super table--- failed:"..res.error)
return
else
print("select from super table--- pass")
for i = 1, #(res.item) do
- print("res:"..res.item[i].count)
+ print("res:"..res.item[i].cnt)
end
end
@@ -127,30 +175,13 @@ end
driver.query_a(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.005', 100),('2019-09-01 00:00:00.006', 101),('2019-09-01 00:00:00.007', 102)", async_query_callback)
+res = driver.query(conn, "create stream stream_avg_degree into avg_degree as select avg(degree) from thermometer interval(5s) sliding(1s)")
-function stream_callback(t)
- print("------------------------")
- print("continuous query result:")
- for key, value in pairs(t) do
- print("key:"..key..", value:"..value)
- end
-end
-
-local stream
-res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0, stream_callback)
-if res.code ~=0 then
- print("open stream--- failed:"..res.error)
- return
-else
- print("open stream--- pass")
- stream = res.stream
-end
-
-print("From now on we start continous insert in an definite (infinite if you want) loop.")
+print("From now on we start continous insert in an definite loop, pls wait for about 10 seconds and check stream table for result.")
local loop_index = 0
-while loop_index < 30 do
+while loop_index < 10 do
local t = os.time()*1000
- local v = loop_index
+ local v = math.random(20)
res = driver.query(conn,string.format("INSERT INTO therm1 VALUES (%d, %d)",t,v))
if res.code ~=0 then
@@ -162,7 +193,5 @@ while loop_index < 30 do
os.execute("sleep " .. 1)
loop_index = loop_index + 1
end
-
-driver.close_stream(stream)
-
+driver.query(conn,"DROP STREAM IF EXISTS avg_therm_s")
driver.close(conn)
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index ba4baa0130602da80da5eb14a45a48dea8a31a03..2544cedda744901ddc98bf9d7698fddf5907513f 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -116,6 +116,7 @@ enum {
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__GET_RES,
STREAM_INPUT__CHECKPOINT,
+ STREAM_INPUT__REF_DATA_BLOCK,
STREAM_INPUT__DESTROY,
};
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index 3492f2485aa390f1e729142b4f52c4ffa741a86f..9b69bec5b3f9e9227a9ae0ae32ccde7358457d1e 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -96,6 +96,8 @@ extern int32_t tsQueryPolicy;
extern int32_t tsQuerySmaOptimize;
extern int32_t tsQueryRsmaTolerance;
extern bool tsQueryPlannerTrace;
+extern int32_t tsQueryNodeChunkSize;
+extern bool tsQueryUseNodeAllocator;
// client
extern int32_t tsMinSlidingTime;
diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h
index 6500d3d1831e817c497406e574b721594e63e209..634dae9ec5f968660bb4cea7867b5e683266bc37 100644
--- a/include/libs/nodes/nodes.h
+++ b/include/libs/nodes/nodes.h
@@ -275,6 +275,17 @@ typedef struct SNodeList {
SListCell* pTail;
} SNodeList;
+typedef struct SNodeAllocator SNodeAllocator;
+
+int32_t nodesInitAllocatorSet();
+void nodesDestroyAllocatorSet();
+int32_t nodesCreateAllocator(int64_t queryId, int32_t chunkSize, int64_t* pAllocatorId);
+int32_t nodesAcquireAllocator(int64_t allocatorId);
+int32_t nodesReleaseAllocator(int64_t allocatorId);
+int64_t nodesMakeAllocatorWeakRef(int64_t allocatorId);
+int64_t nodesReleaseAllocatorWeakRef(int64_t allocatorId);
+void nodesDestroyAllocator(int64_t allocatorId);
+
SNode* nodesMakeNode(ENodeType type);
void nodesDestroyNode(SNode* pNode);
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index 95bde858640b3d4cd5df616bc1d0a5a65795d8f3..b1a937910dfe8defd107ec525afd20edfc639aaf 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -56,6 +56,7 @@ typedef struct SParseContext {
bool nodeOffline;
SArray* pTableMetaPos; // sql table pos => catalog data pos
SArray* pTableVgroupPos; // sql table pos => catalog data pos
+ int64_t allocatorId;
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h
index e03ac3811a11b3927531a6250f5b41fb876c0f1c..e52fe39527dda9aa80ea05c1ffcab487b84cd466 100644
--- a/include/libs/planner/planner.h
+++ b/include/libs/planner/planner.h
@@ -39,6 +39,7 @@ typedef struct SPlanContext {
int32_t msgLen;
const char* pUser;
bool sysInfo;
+ int64_t allocatorId;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h
index e6973cd390c10ff524f70549d161090582ee56ab..738d057e6ac531e726cd069eaf0de35bdc15c365 100644
--- a/include/libs/scheduler/scheduler.h
+++ b/include/libs/scheduler/scheduler.h
@@ -67,6 +67,7 @@ typedef struct SSchedulerReq {
SRequestConnInfo *pConn;
SArray *pNodeList;
SQueryPlan *pDag;
+ int64_t allocatorRefId;
const char *sql;
int64_t startTs;
schedulerExecFp execFp;
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 554d66d62120977d45bba05988ff495482ef8246..bdc12f7e3f1c03c88ea87233f3bf708b86800c5b 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -125,6 +125,14 @@ typedef struct {
SArray* blocks; // SArray
} SStreamDataBlock;
+// ref data block, for delete
+typedef struct {
+ int8_t type;
+ int64_t ver;
+ int32_t* dataRef;
+ SSDataBlock* pBlock;
+} SStreamRefDataBlock;
+
typedef struct {
int8_t type;
} SStreamCheckpoint;
@@ -339,7 +347,8 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
qDebug("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data);
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
// qStreamInput(pTask->exec.executor, pSubmitClone);
- } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
+ } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE ||
+ pItem->type == STREAM_INPUT__REF_DATA_BLOCK) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
// qStreamInput(pTask->exec.executor, pItem);
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
@@ -492,7 +501,9 @@ typedef struct {
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
-void tFreeStreamDispatchReq(SStreamDispatchReq* pReq);
+void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
+
+void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupTrigger(SStreamTask* pTask);
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index e6a4dd1d493969a333005a64f515ba35dde34573..285e079b3ec90a066cd70fa3e7576dac3d5c8b8d 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -22,6 +22,7 @@ extern "C" {
#include "cJSON.h"
#include "tdef.h"
+#include "tlrucache.h"
#include "tmsgcb.h"
extern bool gRaftDetailLog;
@@ -153,7 +154,8 @@ typedef struct SSyncFSM {
// abstract definition of log store in raft
// SWal implements it
typedef struct SSyncLogStore {
- void* data;
+ SLRUCache* pCache;
+ void* data;
// append one log entry
int32_t (*appendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh
index 794b3968fe4df178e3da91d2ebbd1512e288a57e..20f93ecaec41045fca38c552e0b150c65b37c0be 100755
--- a/packaging/testpackage.sh
+++ b/packaging/testpackage.sh
@@ -202,8 +202,8 @@ elif [[ ${packgeName} =~ "tar" ]];then
cd ${oriInstallPath}/${originTdpPath} && tar xf ${subFile}
fi
- cd ${oriInstallPath}/${originTdpPath} && tree > ${installPath}/base_${originversion}_checkfile
- cd ${installPath}/${tdPath} && tree > ${installPath}/now_${version}_checkfile
+ cd ${oriInstallPath}/${originTdpPath} && tree -I "driver" > ${installPath}/base_${originversion}_checkfile
+ cd ${installPath}/${tdPath} && tree -I "driver" > ${installPath}/now_${version}_checkfile
cd ${installPath}
diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
@@ -215,6 +215,7 @@ elif [[ ${packgeName} =~ "tar" ]];then
exit -1
else
echoColor G "The number and names of files are the same as previous installation packages"
+ rm -rf ${installPath}/diffFile.log
fi
echoColor YD "===== install Package of tar ====="
cd ${installPath}/${tdPath}
@@ -251,6 +252,9 @@ if [[ ${packgeName} =~ "server" ]] ;then
systemctl restart taosd
fi
+rm -rf ${installPath}/${packgeName}
+rm -rf ${installPath}/${tdPath}/
+
# if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
# echoColor G "===== install taos-tools when package is lite or client ====="
# cd ${installPath}
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index 574d8188fef5b2866b17f7a9f8f7842f162421a3..44503c66383e24653a27181ad07f53e2fef6d3b5 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -250,6 +250,7 @@ typedef struct SRequestObj {
bool inRetry;
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
uint32_t retry;
+ int64_t allocatorRefId;
} SRequestObj;
typedef struct SSyncQueryParam {
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index b739aedca0ff7c8bd8e408e2e456aa7414f1ac30..2faf268880e041f67705507ef09834e2e1b15187 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -288,6 +288,7 @@ void *createRequest(uint64_t connId, int32_t type) {
pRequest->body.resInfo.convertUcs4 = true; // convert ucs4 by default
pRequest->type = type;
+ pRequest->allocatorRefId = -1;
pRequest->pDb = getDbOfConnection(pTscObj);
pRequest->pTscObj = pTscObj;
@@ -349,6 +350,7 @@ void doDestroyRequest(void *p) {
taosArrayDestroy(pRequest->tableList);
taosArrayDestroy(pRequest->dbList);
taosArrayDestroy(pRequest->targetTableList);
+ nodesDestroyAllocator(pRequest->allocatorRefId);
destroyQueryExecRes(&pRequest->body.resInfo.execRes);
@@ -411,6 +413,7 @@ void taos_init_imp(void) {
initTaskQueue();
fmFuncMgtInit();
+ nodesInitAllocatorSet();
clientConnRefPool = taosOpenRef(200, destroyTscObj);
clientReqRefPool = taosOpenRef(40960, doDestroyRequest);
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 5128695359b9628d873af0a75373e530f78a6403..ef19eba7fe9001e299aef8b599128d7e59e852bd 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -195,6 +195,19 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
+ (*pRequest)->allocatorRefId = -1;
+ if (tsQueryUseNodeAllocator && !qIsInsertValuesSql((*pRequest)->sqlstr, (*pRequest)->sqlLen)) {
+ if (TSDB_CODE_SUCCESS !=
+ nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) {
+ tscError("%d failed to create node allocator, reqId:0x%" PRIx64 ", conn:%d, %s", (*pRequest)->self,
+ (*pRequest)->requestId, pTscObj->id, sql);
+
+ destroyRequest(*pRequest);
+ *pRequest = NULL;
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ }
+
tscDebugL("0x%" PRIx64 " SQL: %s, reqId:0x%" PRIx64, (*pRequest)->self, (*pRequest)->sqlstr, (*pRequest)->requestId);
return TSDB_CODE_SUCCESS;
}
@@ -1023,7 +1036,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user,
- .sysInfo = pRequest->pTscObj->sysInfo};
+ .sysInfo = pRequest->pTscObj->sysInfo,
+ .allocatorId = pRequest->allocatorRefId};
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
SQueryPlan* pDag = NULL;
@@ -1048,6 +1062,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
.pConn = &conn,
.pNodeList = pNodeList,
.pDag = pDag,
+ .allocatorRefId = pRequest->allocatorRefId,
.sql = pRequest->sqlstr,
.startTs = pRequest->metric.start,
.execFp = schedulerExecCb,
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 13cbaa0e2290eb186ed439ccdc965ebeb736a40f..6b707bf7a097f388561c08a7618d3d472210f726 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -65,6 +65,7 @@ void taos_cleanup(void) {
fmFuncMgtDestroy();
qCleanupKeywordsTable();
+ nodesDestroyAllocatorSet();
id = clientConnRefPool;
clientConnRefPool = -1;
@@ -775,7 +776,8 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
.enableSysInfo = pTscObj->sysInfo,
.async = true,
.svrVer = pTscObj->sVer,
- .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)};
+ .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
+ .allocatorId = pRequest->allocatorRefId};
return TSDB_CODE_SUCCESS;
}
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 3359be37733a4656c0d9c7668ef5c0cf54c83348..fa8fdb19d4575ff339be7dcf58ad17b15808e330 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -93,6 +93,8 @@ int32_t tsQueryPolicy = 1;
int32_t tsQuerySmaOptimize = 0;
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
bool tsQueryPlannerTrace = false;
+int32_t tsQueryNodeChunkSize = 32 * 1024;
+bool tsQueryUseNodeAllocator = true;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@@ -286,6 +288,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 3, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1;
+ if (cfgAddInt32(pCfg, "queryNodeChunkSize", tsQueryNodeChunkSize, 1024, 128 * 1024, true) != 0) return -1;
+ if (cfgAddBool(pCfg, "queryUseNodeAllocator", tsQueryUseNodeAllocator, true) != 0) return -1;
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
@@ -647,6 +651,8 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
+ tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
+ tsQueryUseNodeAllocator = cfgGetItem(pCfg, "queryUseNodeAllocator")->bval;
return 0;
}
@@ -982,6 +988,10 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32;
} else if (strcasecmp("queryPlannerTrace", name) == 0) {
tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
+ } else if (strcasecmp("queryNodeChunkSize", name) == 0) {
+ tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
+ } else if (strcasecmp("queryUseNodeAllocator", name) == 0) {
+ tsQueryUseNodeAllocator = cfgGetItem(pCfg, "queryUseNodeAllocator")->bval;
} else if (strcasecmp("queryRsmaTolerance", name) == 0) {
tsQueryRsmaTolerance = cfgGetItem(pCfg, "queryRsmaTolerance")->i32;
}
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index ce2a1c4b89336dcd48ae481960e614978f532030..932afe89371433d16b162d9fc52a3c74fb2b0561 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -362,8 +362,9 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
// update uid index
metaUpdateUidIdx(pMeta, &nStbEntry);
- if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf);
metaULock(pMeta);
+
+ if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf);
tDecoderClear(&dc);
tdbTbcClose(pTbDbc);
tdbTbcClose(pUidIdxc);
@@ -922,6 +923,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
taosArrayDestroy(pTagArray);
}
+ metaWLock(pMeta);
+
// save to table.db
metaSaveToTbDb(pMeta, &ctbEntry);
@@ -936,6 +939,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags,
((STag *)(ctbEntry.ctbEntry.pTags))->len, &pMeta->txn);
+ metaULock(pMeta);
+
tDecoderClear(&dc1);
tDecoderClear(&dc2);
if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 98b7dd7163089e5a7e067ca069fb5f61959a74d3..29217e29a4f9cf5b5a2f70db77a931b88eded27b 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -59,7 +59,7 @@ static void destroySTqHandle(void* data) {
tqCloseReader(pData->execHandle.pExecReader);
walCloseReader(pData->pWalReader);
taosHashCleanup(pData->execHandle.execDb.pFilterOutTbUid);
- } else if (pData->execHandle.subType == TOPIC_SUB_TYPE__TABLE){
+ } else if (pData->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
walCloseReader(pData->pWalReader);
tqCloseReader(pData->execHandle.pExecReader);
}
@@ -664,7 +664,10 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
tqError("vgId:%d, build new consumer handle %s for consumer %d, but old consumerId is %ld", req.vgId, req.subKey,
req.newConsumerId, req.oldConsumerId);
}
- ASSERT(req.newConsumerId != -1);
+ if (req.newConsumerId == -1) {
+ tqError("vgId:%d, tq invalid rebalance request, new consumerId %ld", req.vgId, req.newConsumerId);
+ return 0;
+ }
STqHandle tqHandle = {0};
pHandle = &tqHandle;
/*taosInitRWLatch(&pExec->lock);*/
@@ -876,6 +879,9 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
taosArrayDestroy(pRes->uidList);
+ int32_t* pRef = taosMemoryMalloc(sizeof(int32_t));
+ *pRef = 1;
+
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
@@ -885,6 +891,33 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
qDebug("delete req enqueue stream task: %d, ver: %" PRId64, pTask->taskId, ver);
+ if (!failed) {
+ SStreamRefDataBlock* pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM);
+ pRefBlock->type = STREAM_INPUT__REF_DATA_BLOCK;
+ pRefBlock->pBlock = pDelBlock;
+ pRefBlock->dataRef = pRef;
+ atomic_add_fetch_32(pRefBlock->dataRef, 1);
+
+ if (streamTaskInput(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
+ qError("stream task input del failed, task id %d", pTask->taskId);
+ continue;
+ }
+ if (streamSchedExec(pTask) < 0) {
+ qError("stream task launch failed, task id %d", pTask->taskId);
+ continue;
+ }
+ } else {
+ streamTaskInputFail(pTask);
+ }
+ }
+ int32_t ref = atomic_sub_fetch_32(pRef, 1);
+ ASSERT(ref >= 0);
+ if (ref == 0) {
+ taosMemoryFree(pDelBlock);
+ taosMemoryFree(pRef);
+ }
+
+#if 0
SStreamDataBlock* pStreamBlock = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
pStreamBlock->type = STREAM_INPUT__DATA_BLOCK;
pStreamBlock->blocks = taosArrayInit(0, sizeof(SSDataBlock));
@@ -908,6 +941,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
}
}
blockDataDestroy(pDelBlock);
+#endif
return 0;
}
@@ -1045,6 +1079,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
SDecoder decoder;
tDecoderInit(&decoder, msgBody, msgLen);
tDecodeStreamRetrieveReq(&decoder, &req);
+ tDecoderClear(&decoder);
int32_t taskId = req.dstTaskId;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
if (pTask) {
@@ -1053,6 +1088,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
.code = 0,
};
streamProcessRetrieveReq(pTask, &req, &rsp);
+ tDeleteStreamRetrieveReq(&req);
return 0;
} else {
return -1;
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 65d4e9aaf10cf9c0ee723eb70b94166cde1fba0d..a941b5955ca0e466cd511ef347b655a4bdee3673 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -676,6 +676,18 @@ static void vnodeLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsm
static void vnodeRestoreFinish(struct SSyncFSM *pFsm) {
SVnode *pVnode = pFsm->data;
+
+ do {
+ int32_t itemSize = tmsgGetQueueSize(&pVnode->msgCb, pVnode->config.vgId, APPLY_QUEUE);
+ if (itemSize == 0) {
+ vInfo("vgId:%d, apply queue is empty, restore finish", pVnode->config.vgId);
+ break;
+ } else {
+ vInfo("vgId:%d, restore not finish since %d items in apply queue", pVnode->config.vgId);
+ taosMsleep(10);
+ }
+ } while (true);
+
pVnode->restored = true;
vDebug("vgId:%d, sync restore finished", pVnode->config.vgId);
}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 8f9a78db83994e0d4252ecca3a5fd9ec76855aa8..18a75f0d04b66cb64fbd5bf5dbab4e5c74716d3a 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -1480,6 +1480,40 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
}
}
+static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, SStreamScanInfo* pInfo) {
+ STqReader* pReader = pInfo->tqReader;
+ int32_t rows = pSrc->info.rows;
+ blockDataEnsureCapacity(pDst, rows);
+
+ SColumnInfoData* pSrcStartCol = taosArrayGet(pSrc->pDataBlock, START_TS_COLUMN_INDEX);
+ uint64_t* startCol = (uint64_t*)pSrcStartCol->pData;
+ SColumnInfoData* pSrcEndCol = taosArrayGet(pSrc->pDataBlock, END_TS_COLUMN_INDEX);
+ uint64_t* endCol = (uint64_t*)pSrcEndCol->pData;
+ SColumnInfoData* pSrcUidCol = taosArrayGet(pSrc->pDataBlock, UID_COLUMN_INDEX);
+ uint64_t* uidCol = (uint64_t*)pSrcUidCol->pData;
+
+ SColumnInfoData* pDstStartCol = taosArrayGet(pDst->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pDstEndCol = taosArrayGet(pDst->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pDstUidCol = taosArrayGet(pDst->pDataBlock, UID_COLUMN_INDEX);
+ int32_t j = 0;
+ for (int32_t i = 0; i < rows; i++) {
+ if (taosHashGet(pReader->tbIdHash, &uidCol[i], sizeof(uint64_t))) {
+ colDataAppend(pDstStartCol, j, (const char*)&startCol[i], false);
+ colDataAppend(pDstEndCol, j, (const char*)&endCol[i], false);
+ colDataAppend(pDstUidCol, j, (const char*)&uidCol[i], false);
+
+ colDataAppendNULL(taosArrayGet(pDst->pDataBlock, GROUPID_COLUMN_INDEX), j);
+ colDataAppendNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), j);
+ colDataAppendNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), j);
+ j++;
+ }
+ }
+ pDst->info = pSrc->info;
+ pDst->info.rows = j;
+
+ return 0;
+}
+
static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
// NOTE: this operator does never check if current status is done or not
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -1568,6 +1602,12 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
} break;
case STREAM_DELETE_DATA: {
printDataBlock(pBlock, "stream scan delete recv");
+ if (pInfo->tqReader) {
+ SSDataBlock* pDelBlock = createSpecialDataBlock(STREAM_DELETE_DATA);
+ filterDelBlockByUid(pDelBlock, pBlock, pInfo);
+ pBlock = pDelBlock;
+ }
+ printDataBlock(pBlock, "stream scan delete recv filtered");
if (!isIntervalWindow(pInfo) && !isSessionWindow(pInfo) && !isStateWindow(pInfo)) {
generateDeleteResultBlock(pInfo, pBlock, pInfo->pDeleteDataRes);
pInfo->pDeleteDataRes->info.type = STREAM_DELETE_RESULT;
@@ -1647,6 +1687,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
while (1) {
if (pInfo->tqReader->pMsg == NULL) {
if (pInfo->validBlockIndex >= totBlockNum) {
+ updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
return NULL;
}
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index aed4ae9b2668e76d36f759696441dd42996b5a93..971f3618a008e8c20537d50ad60ee240e51d5c9c 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -1695,6 +1695,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
}
nodesDestroyNode((SNode*)pInfo->pPhyNode);
colDataDestroy(&pInfo->twAggSup.timeWindowData);
+ cleanupGroupResInfo(&pInfo->groupResInfo);
taosMemoryFreeClear(param);
}
@@ -3073,6 +3074,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
taosArrayRemove(chArray, index);
if (taosArrayGetSize(chArray) == 0) {
// pull data is over
+ taosArrayDestroy(chArray);
taosHashRemove(pMap, &winRes, sizeof(SWinKey));
}
}
@@ -3109,9 +3111,6 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
SOperatorInfo* downstream = pOperator->pDownstream[0];
- SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
- SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
TSKEY maxTs = INT64_MIN;
TSKEY minTs = INT64_MAX;
@@ -3175,6 +3174,9 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
}
+ SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -5755,8 +5757,6 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
- SStreamState* pState = pTaskInfo->streamInfo.pState;
-
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -5805,36 +5805,6 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
}
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
pInfo->twAggSup.minTs = TMIN(pInfo->twAggSup.minTs, minTs);
-
-#if 0
- if (pState) {
- printf(">>>>>>>> stream read backend\n");
- SWinKey key = {
- .ts = 1,
- .groupId = 2,
- };
- char* val = NULL;
- int32_t sz;
- if (streamStateGet(pState, &key, (void**)&val, &sz) < 0) {
- ASSERT(0);
- }
- printf("stream read %s %d\n", val, sz);
- streamFreeVal(val);
-
- SStreamStateCur* pCur = streamStateGetCur(pState, &key);
- ASSERT(pCur);
- while (streamStateCurNext(pState, pCur) == 0) {
- SWinKey key1;
- const void* val1;
- if (streamStateGetKVByCur(pCur, &key1, &val1, &sz) < 0) {
- break;
- }
- printf("stream iter key groupId:%d ts:%d, value %s %d\n", key1.groupId, key1.ts, val1, sz);
- }
- streamStateFreeCur(pCur);
- }
-#endif
-
pOperator->status = OP_RES_TO_RETURN;
closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap,
pOperator);
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index 805ddb9e422db5a42c6aed1594985d5a233c4ccd..2e5a0d935b40ecb28899bdf6d4826b3db2a1b1cb 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -21,9 +21,209 @@
#include "taoserror.h"
#include "tdatablock.h"
#include "thash.h"
+#include "tref.h"
+
+typedef struct SNodeMemChunk {
+ int32_t availableSize;
+ int32_t usedSize;
+ char* pBuf;
+ struct SNodeMemChunk* pNext;
+} SNodeMemChunk;
+
+typedef struct SNodeAllocator {
+ int64_t self;
+ int64_t queryId;
+ int32_t chunkSize;
+ int32_t chunkNum;
+ SNodeMemChunk* pCurrChunk;
+ SNodeMemChunk* pChunks;
+ TdThreadMutex mutex;
+} SNodeAllocator;
+
+static threadlocal SNodeAllocator* g_pNodeAllocator;
+static int32_t g_allocatorReqRefPool = -1;
+
+static SNodeMemChunk* callocNodeChunk(SNodeAllocator* pAllocator) {
+ SNodeMemChunk* pNewChunk = taosMemoryCalloc(1, sizeof(SNodeMemChunk) + pAllocator->chunkSize);
+ if (NULL == pNewChunk) {
+ return NULL;
+ }
+ pNewChunk->pBuf = (char*)(pNewChunk + 1);
+ pNewChunk->availableSize = pAllocator->chunkSize;
+ pNewChunk->usedSize = 0;
+ pNewChunk->pNext = NULL;
+ if (NULL != pAllocator->pCurrChunk) {
+ pAllocator->pCurrChunk->pNext = pNewChunk;
+ }
+ pAllocator->pCurrChunk = pNewChunk;
+ if (NULL == pAllocator->pChunks) {
+ pAllocator->pChunks = pNewChunk;
+ }
+ ++(pAllocator->chunkNum);
+ return pNewChunk;
+}
+
+static void* nodesCallocImpl(int32_t size) {
+ if (NULL == g_pNodeAllocator) {
+ return taosMemoryCalloc(1, size);
+ }
+
+ if (g_pNodeAllocator->pCurrChunk->usedSize + size > g_pNodeAllocator->pCurrChunk->availableSize) {
+ if (NULL == callocNodeChunk(g_pNodeAllocator)) {
+ return NULL;
+ }
+ }
+ void* p = g_pNodeAllocator->pCurrChunk->pBuf + g_pNodeAllocator->pCurrChunk->usedSize;
+ g_pNodeAllocator->pCurrChunk->usedSize += size;
+ return p;
+}
+
+static void* nodesCalloc(int32_t num, int32_t size) {
+ void* p = nodesCallocImpl(num * size + 1);
+ if (NULL == p) {
+ return NULL;
+ }
+ *(char*)p = (NULL != g_pNodeAllocator) ? 1 : 0;
+ return (char*)p + 1;
+}
+
+static void nodesFree(void* p) {
+ char* ptr = (char*)p - 1;
+ if (0 == *ptr) {
+ taosMemoryFree(ptr);
+ }
+ return;
+}
+
+static int32_t createNodeAllocator(int32_t chunkSize, SNodeAllocator** pAllocator) {
+ *pAllocator = taosMemoryCalloc(1, sizeof(SNodeAllocator));
+ if (NULL == *pAllocator) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ (*pAllocator)->chunkSize = chunkSize;
+ if (NULL == callocNodeChunk(*pAllocator)) {
+ taosMemoryFreeClear(*pAllocator);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ taosThreadMutexInit(&(*pAllocator)->mutex, NULL);
+ return TSDB_CODE_SUCCESS;
+}
+
+static void destroyNodeAllocator(void* p) {
+ if (NULL == p) {
+ return;
+ }
+
+ SNodeAllocator* pAllocator = p;
+
+ nodesDebug("query id %" PRIx64 " allocator id %" PRIx64 " alloc chunkNum: %d, chunkTotakSize: %d",
+ pAllocator->queryId, pAllocator->self, pAllocator->chunkNum, pAllocator->chunkNum * pAllocator->chunkSize);
+
+ SNodeMemChunk* pChunk = pAllocator->pChunks;
+ while (NULL != pChunk) {
+ SNodeMemChunk* pTemp = pChunk->pNext;
+ taosMemoryFree(pChunk);
+ pChunk = pTemp;
+ }
+ taosThreadMutexDestroy(&pAllocator->mutex);
+ taosMemoryFree(pAllocator);
+}
+
+int32_t nodesInitAllocatorSet() {
+ if (g_allocatorReqRefPool >= 0) {
+ nodesWarn("nodes already initialized");
+ return TSDB_CODE_SUCCESS;
+ }
+
+ g_allocatorReqRefPool = taosOpenRef(1024, destroyNodeAllocator);
+ if (g_allocatorReqRefPool < 0) {
+ nodesError("init nodes failed");
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+void nodesDestroyAllocatorSet() {
+ if (g_allocatorReqRefPool >= 0) {
+ SNodeAllocator* pAllocator = taosIterateRef(g_allocatorReqRefPool, 0);
+ int64_t refId = 0;
+ while (NULL != pAllocator) {
+ refId = pAllocator->self;
+ taosRemoveRef(g_allocatorReqRefPool, refId);
+ pAllocator = taosIterateRef(g_allocatorReqRefPool, refId);
+ }
+ taosCloseRef(g_allocatorReqRefPool);
+ }
+}
+
+int32_t nodesCreateAllocator(int64_t queryId, int32_t chunkSize, int64_t* pAllocatorId) {
+ SNodeAllocator* pAllocator = NULL;
+ int32_t code = createNodeAllocator(chunkSize, &pAllocator);
+ if (TSDB_CODE_SUCCESS == code) {
+ pAllocator->self = taosAddRef(g_allocatorReqRefPool, pAllocator);
+ if (pAllocator->self <= 0) {
+ return terrno;
+ }
+ pAllocator->queryId = queryId;
+ *pAllocatorId = pAllocator->self;
+ }
+ return code;
+}
+
+int32_t nodesAcquireAllocator(int64_t allocatorId) {
+ if (allocatorId <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SNodeAllocator* pAllocator = taosAcquireRef(g_allocatorReqRefPool, allocatorId);
+ if (NULL == pAllocator) {
+ return terrno;
+ }
+ taosThreadMutexLock(&pAllocator->mutex);
+ g_pNodeAllocator = pAllocator;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t nodesReleaseAllocator(int64_t allocatorId) {
+ if (allocatorId <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (NULL == g_pNodeAllocator) {
+ nodesError("allocator id %" PRIx64
+ " release failed: The nodesReleaseAllocator function needs to be called after the nodesAcquireAllocator "
+ "function is called!",
+ allocatorId);
+ return TSDB_CODE_FAILED;
+ }
+ SNodeAllocator* pAllocator = g_pNodeAllocator;
+ g_pNodeAllocator = NULL;
+ taosThreadMutexUnlock(&pAllocator->mutex);
+ return taosReleaseRef(g_allocatorReqRefPool, allocatorId);
+}
+
+int64_t nodesMakeAllocatorWeakRef(int64_t allocatorId) {
+ if (allocatorId <= 0) {
+ return 0;
+ }
+
+ SNodeAllocator* pAllocator = taosAcquireRef(g_allocatorReqRefPool, allocatorId);
+ return pAllocator->self;
+}
+
+int64_t nodesReleaseAllocatorWeakRef(int64_t allocatorId) { return taosReleaseRef(g_allocatorReqRefPool, allocatorId); }
+
+void nodesDestroyAllocator(int64_t allocatorId) {
+ if (allocatorId <= 0) {
+ return;
+ }
+
+ taosRemoveRef(g_allocatorReqRefPool, allocatorId);
+}
-static SNode* makeNode(ENodeType type, size_t size) {
- SNode* p = taosMemoryCalloc(1, size);
+static SNode* makeNode(ENodeType type, int32_t size) {
+ SNode* p = nodesCalloc(1, size);
if (NULL == p) {
return NULL;
}
@@ -824,6 +1024,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pLogicNode->pWStartTs);
nodesDestroyNode(pLogicNode->pValues);
nodesDestroyList(pLogicNode->pFillExprs);
+ nodesDestroyList(pLogicNode->pNotFillExprs);
break;
}
case QUERY_NODE_LOGIC_PLAN_SORT: {
@@ -1021,12 +1222,12 @@ void nodesDestroyNode(SNode* pNode) {
default:
break;
}
- taosMemoryFreeClear(pNode);
+ nodesFree(pNode);
return;
}
SNodeList* nodesMakeList() {
- SNodeList* p = taosMemoryCalloc(1, sizeof(SNodeList));
+ SNodeList* p = nodesCalloc(1, sizeof(SNodeList));
if (NULL == p) {
return NULL;
}
@@ -1037,7 +1238,7 @@ int32_t nodesListAppend(SNodeList* pList, SNode* pNode) {
if (NULL == pList || NULL == pNode) {
return TSDB_CODE_FAILED;
}
- SListCell* p = taosMemoryCalloc(1, sizeof(SListCell));
+ SListCell* p = nodesCalloc(1, sizeof(SListCell));
if (NULL == p) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_OUT_OF_MEMORY;
@@ -1104,7 +1305,7 @@ int32_t nodesListAppendList(SNodeList* pTarget, SNodeList* pSrc) {
}
pTarget->pTail = pSrc->pTail;
pTarget->length += pSrc->length;
- taosMemoryFreeClear(pSrc);
+ nodesFree(pSrc);
return TSDB_CODE_SUCCESS;
}
@@ -1124,7 +1325,7 @@ int32_t nodesListPushFront(SNodeList* pList, SNode* pNode) {
if (NULL == pList || NULL == pNode) {
return TSDB_CODE_FAILED;
}
- SListCell* p = taosMemoryCalloc(1, sizeof(SListCell));
+ SListCell* p = nodesCalloc(1, sizeof(SListCell));
if (NULL == p) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_OUT_OF_MEMORY;
@@ -1152,7 +1353,7 @@ SListCell* nodesListErase(SNodeList* pList, SListCell* pCell) {
}
SListCell* pNext = pCell->pNext;
nodesDestroyNode(pCell->pNode);
- taosMemoryFreeClear(pCell);
+ nodesFree(pCell);
--(pList->length);
return pNext;
}
@@ -1172,7 +1373,7 @@ void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc) {
pPos->pPrev = pSrc->pTail;
pTarget->length += pSrc->length;
- taosMemoryFreeClear(pSrc);
+ nodesFree(pSrc);
}
SNode* nodesListGetNode(SNodeList* pList, int32_t index) {
@@ -1204,7 +1405,7 @@ void nodesDestroyList(SNodeList* pList) {
while (NULL != pNext) {
pNext = nodesListErase(pList, pNext);
}
- taosMemoryFreeClear(pList);
+ nodesFree(pList);
}
void nodesClearList(SNodeList* pList) {
@@ -1216,9 +1417,9 @@ void nodesClearList(SNodeList* pList) {
while (NULL != pNext) {
SListCell* tmp = pNext;
pNext = pNext->pNext;
- taosMemoryFreeClear(tmp);
+ nodesFree(tmp);
}
- taosMemoryFreeClear(pList);
+ nodesFree(pList);
}
void* nodesGetValueFromNode(SValueNode* pNode) {
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 6f11c653a4a853744922c9ad17464aad71d77142..379bd975b4ba9c0783b8adcf83ed571a9b3b8ec6 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -247,7 +247,8 @@ SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) {
pExpr->userAlias[len] = '\0';
}
}
- taosMemoryFreeClear(pNode);
+ pRawExpr->pNode = NULL;
+ nodesDestroyNode(pNode);
return pRealizedExpr;
}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 7ee6a5b2236b24a676214c3538ed182aa52f427a..2fe6ebfb79447653731ff907e5128dfededdf111 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -177,15 +177,18 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) {
int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) {
SParseMetaCache metaCache = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- if (qIsInsertValuesSql(pCxt->pSql, pCxt->sqlLen)) {
- code = parseInsertSyntax(pCxt, pQuery, &metaCache);
- } else {
- code = parseSqlSyntax(pCxt, pQuery, &metaCache);
+ int32_t code = nodesAcquireAllocator(pCxt->allocatorId);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (qIsInsertValuesSql(pCxt->pSql, pCxt->sqlLen)) {
+ code = parseInsertSyntax(pCxt, pQuery, &metaCache);
+ } else {
+ code = parseSqlSyntax(pCxt, pQuery, &metaCache);
+ }
}
if (TSDB_CODE_SUCCESS == code) {
code = buildCatalogReq(pCxt, &metaCache, pCatalogReq);
}
+ nodesReleaseAllocator(pCxt->allocatorId);
destoryParseMetaCache(&metaCache, true);
terrno = code;
return code;
@@ -194,7 +197,10 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery) {
SParseMetaCache metaCache = {0};
- int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache, NULL == pQuery->pRoot);
+ int32_t code = nodesAcquireAllocator(pCxt->allocatorId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache, NULL == pQuery->pRoot);
+ }
if (TSDB_CODE_SUCCESS == code) {
if (NULL == pQuery->pRoot) {
code = parseInsertSql(pCxt, &pQuery, &metaCache);
@@ -202,6 +208,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
code = analyseSemantic(pCxt, pQuery, &metaCache);
}
}
+ nodesReleaseAllocator(pCxt->allocatorId);
destoryParseMetaCache(&metaCache, false);
terrno = code;
return code;
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 360b904c170e50682b17d9c99a8ec1cd679a6db0..14c991917bf534fb2719b2c9d90aff9325042615 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -119,12 +119,18 @@ class ParserTestBaseImpl {
TEST_INTERFACE_ASYNC_API
};
- static void _destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) {
+ static void destoryParseContext(SParseContext* pCxt) {
+ taosArrayDestroy(pCxt->pTableMetaPos);
+ taosArrayDestroy(pCxt->pTableVgroupPos);
+ delete pCxt;
+ }
+
+ static void destoryParseMetaCacheWarpper(SParseMetaCache* pMetaCache, bool request) {
destoryParseMetaCache(pMetaCache, request);
delete pMetaCache;
}
- static void _destroyQuery(SQuery** pQuery) {
+ static void destroyQuery(SQuery** pQuery) {
if (nullptr == pQuery) {
return;
}
@@ -303,10 +309,10 @@ class ParserTestBaseImpl {
setParseContext(sql, &cxt);
if (qIsInsertValuesSql(cxt.pSql, cxt.sqlLen)) {
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
doParseInsertSql(&cxt, query.get(), nullptr);
} else {
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
doParse(&cxt, query.get());
SQuery* pQuery = *(query.get());
@@ -335,7 +341,7 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
doParseSql(&cxt, query.get());
SQuery* pQuery = *(query.get());
@@ -354,26 +360,26 @@ class ParserTestBaseImpl {
void runAsyncInternalFuncs(const string& sql, int32_t expect, ParserStage checkStage) {
reset(expect, checkStage, TEST_INTERFACE_ASYNC_INTERNAL);
try {
- SParseContext cxt = {0};
- setParseContext(sql, &cxt, true);
+ unique_ptr > cxt(new SParseContext(), destoryParseContext);
+ setParseContext(sql, cxt.get(), true);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
bool request = true;
unique_ptr > metaCache(
- new SParseMetaCache(), bind(_destoryParseMetaCache, _1, cref(request)));
- bool isInsertValues = qIsInsertValuesSql(cxt.pSql, cxt.sqlLen);
+ new SParseMetaCache(), bind(destoryParseMetaCacheWarpper, _1, cref(request)));
+ bool isInsertValues = qIsInsertValuesSql(cxt->pSql, cxt->sqlLen);
if (isInsertValues) {
- doParseInsertSyntax(&cxt, query.get(), metaCache.get());
+ doParseInsertSyntax(cxt.get(), query.get(), metaCache.get());
} else {
- doParse(&cxt, query.get());
- doCollectMetaKey(&cxt, *(query.get()), metaCache.get());
+ doParse(cxt.get(), query.get());
+ doCollectMetaKey(cxt.get(), *(query.get()), metaCache.get());
}
SQuery* pQuery = *(query.get());
unique_ptr catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
- doBuildCatalogReq(&cxt, metaCache.get(), catalogReq.get());
+ doBuildCatalogReq(cxt.get(), metaCache.get(), catalogReq.get());
string err;
thread t1([&]() {
@@ -386,13 +392,13 @@ class ParserTestBaseImpl {
doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get(), isInsertValues);
if (isInsertValues) {
- doParseInsertSql(&cxt, query.get(), metaCache.get());
+ doParseInsertSql(cxt.get(), query.get(), metaCache.get());
} else {
- doAuthenticate(&cxt, pQuery, metaCache.get());
+ doAuthenticate(cxt.get(), pQuery, metaCache.get());
- doTranslate(&cxt, pQuery, metaCache.get());
+ doTranslate(cxt.get(), pQuery, metaCache.get());
- doCalculateConstant(&cxt, pQuery);
+ doCalculateConstant(cxt.get(), pQuery);
}
} catch (const TerminateFlag& e) {
// success and terminate
@@ -423,13 +429,13 @@ class ParserTestBaseImpl {
void runAsyncApis(const string& sql, int32_t expect, ParserStage checkStage) {
reset(expect, checkStage, TEST_INTERFACE_ASYNC_API);
try {
- SParseContext cxt = {0};
- setParseContext(sql, &cxt);
+ unique_ptr > cxt(new SParseContext(), destoryParseContext);
+ setParseContext(sql, cxt.get());
unique_ptr catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
- doParseSqlSyntax(&cxt, query.get(), catalogReq.get());
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
+ doParseSqlSyntax(cxt.get(), query.get(), catalogReq.get());
SQuery* pQuery = *(query.get());
string err;
@@ -438,7 +444,7 @@ class ParserTestBaseImpl {
unique_ptr metaData(new SMetaData(), MockCatalogService::destoryMetaData);
doGetAllMeta(catalogReq.get(), metaData.get());
- doAnalyseSqlSemantic(&cxt, catalogReq.get(), metaData.get(), pQuery);
+ doAnalyseSqlSemantic(cxt.get(), catalogReq.get(), metaData.get(), pQuery);
} catch (const TerminateFlag& e) {
// success and terminate
} catch (const runtime_error& e) {
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index a3cb0c265420fcd385aacd1dbc979fe2a4d24e59..106e6741522bba0a91f0a7763f313f7211c24f46 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -1007,6 +1007,7 @@ static int32_t stbSplSplitMergeScanNode(SSplitContext* pCxt, SLogicSubplan* pSub
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, pMergeScan, groupSort);
}
if (TSDB_CODE_SUCCESS == code) {
+ nodesDestroyNode((SNode*)pScan);
code = nodesListMakeStrictAppend(&pSubplan->pChildren,
(SNode*)splCreateScanSubplan(pCxt, pMergeScan, SPLIT_FLAG_STABLE_SPLIT));
}
diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c
index 35903d45b16175c1f9c21904b96ced434178ec51..e4f02f12e698768f559bad30ef2a951022bb998e 100644
--- a/source/libs/planner/src/planner.c
+++ b/source/libs/planner/src/planner.c
@@ -33,7 +33,10 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo
SLogicSubplan* pLogicSubplan = NULL;
SQueryLogicPlan* pLogicPlan = NULL;
- int32_t code = createLogicPlan(pCxt, &pLogicSubplan);
+ int32_t code = nodesAcquireAllocator(pCxt->allocatorId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = createLogicPlan(pCxt, &pLogicSubplan);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = optimizeLogicPlan(pCxt, pLogicSubplan);
}
@@ -49,6 +52,7 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo
if (TSDB_CODE_SUCCESS == code) {
dumpQueryPlan(*pPlan);
}
+ nodesReleaseAllocator(pCxt->allocatorId);
nodesDestroyNode((SNode*)pLogicSubplan);
nodesDestroyNode((SNode*)pLogicPlan);
diff --git a/source/libs/planner/test/planTestMain.cpp b/source/libs/planner/test/planTestMain.cpp
index 8f6fc832a2d8c4722c02781d2a357606a1eb481b..df6e72ce46e0aad05f62e45ba66e38c8f0c9fc96 100644
--- a/source/libs/planner/test/planTestMain.cpp
+++ b/source/libs/planner/test/planTestMain.cpp
@@ -22,6 +22,7 @@
#include "mockCatalog.h"
#include "parser.h"
#include "planTestUtil.h"
+#include "tglobal.h"
class PlannerEnv : public testing::Environment {
public:
@@ -30,6 +31,8 @@ class PlannerEnv : public testing::Environment {
initMetaDataEnv();
generateMetaData();
initLog(TD_TMP_DIR_PATH "td");
+ initCfg();
+ nodesInitAllocatorSet();
}
virtual void TearDown() {
@@ -37,6 +40,7 @@ class PlannerEnv : public testing::Environment {
qCleanupKeywordsTable();
fmFuncMgtDestroy();
taosCloseLog();
+ nodesDestroyAllocatorSet();
}
PlannerEnv() {}
@@ -67,6 +71,8 @@ class PlannerEnv : public testing::Environment {
std::cout << "failed to init log file" << std::endl;
}
}
+
+ void initCfg() { tsQueryPlannerTrace = true; }
};
static void parseArg(int argc, char* argv[]) {
@@ -79,6 +85,7 @@ static void parseArg(int argc, char* argv[]) {
{"limitSql", required_argument, NULL, 'i'},
{"log", required_argument, NULL, 'l'},
{"queryPolicy", required_argument, NULL, 'q'},
+ {"useNodeAllocator", required_argument, NULL, 'a'},
{0, 0, 0, 0}
};
// clang-format on
@@ -99,6 +106,9 @@ static void parseArg(int argc, char* argv[]) {
case 'q':
setQueryPolicy(optarg);
break;
+ case 'a':
+ setUseNodeAllocator(optarg);
+ break;
default:
break;
}
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index 2b8e3d98644fcfb1595642ab9cc4a3b8f078366a..73d695195cab5b1d5257c1b783e0c3a5dfe05840 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -41,6 +41,7 @@ using namespace testing;
enum DumpModule {
DUMP_MODULE_NOTHING = 1,
+ DUMP_MODULE_SQL,
DUMP_MODULE_PARSER,
DUMP_MODULE_LOGIC,
DUMP_MODULE_OPTIMIZED,
@@ -56,10 +57,13 @@ int32_t g_skipSql = 0;
int32_t g_limitSql = 0;
int32_t g_logLevel = 131;
int32_t g_queryPolicy = QUERY_POLICY_VNODE;
+bool g_useNodeAllocator = false;
void setDumpModule(const char* pModule) {
if (NULL == pModule) {
g_dumpModule = DUMP_MODULE_ALL;
+ } else if (0 == strncasecmp(pModule, "sql", strlen(pModule))) {
+ g_dumpModule = DUMP_MODULE_SQL;
} else if (0 == strncasecmp(pModule, "parser", strlen(pModule))) {
g_dumpModule = DUMP_MODULE_PARSER;
} else if (0 == strncasecmp(pModule, "logic", strlen(pModule))) {
@@ -79,10 +83,11 @@ void setDumpModule(const char* pModule) {
}
}
-void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
-void setLimitSqlNum(const char* pNum) { g_limitSql = stoi(pNum); }
-void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
-void setQueryPolicy(const char* pQueryPolicy) { g_queryPolicy = stoi(pQueryPolicy); }
+void setSkipSqlNum(const char* pArg) { g_skipSql = stoi(pArg); }
+void setLimitSqlNum(const char* pArg) { g_limitSql = stoi(pArg); }
+void setLogLevel(const char* pArg) { g_logLevel = stoi(pArg); }
+void setQueryPolicy(const char* pArg) { g_queryPolicy = stoi(pArg); }
+void setUseNodeAllocator(const char* pArg) { g_useNodeAllocator = stoi(pArg); }
int32_t getLogLevel() { return g_logLevel; }
@@ -124,6 +129,12 @@ class PlannerTestBaseImpl {
}
void runImpl(const string& sql, int32_t queryPolicy) {
+ int64_t allocatorId = 0;
+ if (g_useNodeAllocator) {
+ nodesCreateAllocator(sqlNo_, 32 * 1024, &allocatorId);
+ nodesAcquireAllocator(allocatorId);
+ }
+
reset();
tsQueryPolicy = queryPolicy;
try {
@@ -155,8 +166,13 @@ class PlannerTestBaseImpl {
dump(g_dumpModule);
} catch (...) {
dump(DUMP_MODULE_ALL);
+ nodesReleaseAllocator(allocatorId);
+ nodesDestroyAllocator(allocatorId);
throw;
}
+
+ nodesReleaseAllocator(allocatorId);
+ nodesDestroyAllocator(allocatorId);
}
void prepare(const string& sql) {
@@ -216,6 +232,8 @@ class PlannerTestBaseImpl {
doCreatePhysiPlan(&cxt, pLogicPlan, &pPlan);
unique_ptr plan(pPlan, (void (*)(SQueryPlan*))nodesDestroyNode);
+ checkPlanMsg((SNode*)pPlan);
+
dump(g_dumpModule);
} catch (...) {
dump(DUMP_MODULE_ALL);
@@ -252,7 +270,6 @@ class PlannerTestBaseImpl {
string splitLogicPlan_;
string scaledLogicPlan_;
string physiPlan_;
- string physiPlanMsg_;
vector physiSubplans_;
};
@@ -276,17 +293,16 @@ class PlannerTestBaseImpl {
res_.splitLogicPlan_.clear();
res_.scaledLogicPlan_.clear();
res_.physiPlan_.clear();
- res_.physiPlanMsg_.clear();
res_.physiSubplans_.clear();
}
void dump(DumpModule module) {
- cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl;
-
if (DUMP_MODULE_NOTHING == module) {
return;
}
+ cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl;
+
if (DUMP_MODULE_ALL == module || DUMP_MODULE_PARSER == module) {
if (res_.prepareAst_.empty()) {
cout << "+++++++++++++++++++++syntax tree : " << endl;
@@ -411,8 +427,6 @@ class PlannerTestBaseImpl {
SNode* pSubplan;
FOREACH(pSubplan, ((SNodeListNode*)pNode)->pNodeList) { res_.physiSubplans_.push_back(toString(pSubplan)); }
}
- res_.physiPlanMsg_ = toMsg((SNode*)(*pPlan));
- cout << "json len: " << res_.physiPlan_.length() << ", msg len: " << res_.physiPlanMsg_.length() << endl;
}
void setPlanContext(SQuery* pQuery, SPlanContext* pCxt) {
@@ -451,27 +465,16 @@ class PlannerTestBaseImpl {
string toString(const SNode* pRoot) {
char* pStr = NULL;
int32_t len = 0;
-
- auto start = chrono::steady_clock::now();
DO_WITH_THROW(nodesNodeToString, pRoot, false, &pStr, &len)
- if (QUERY_NODE_PHYSICAL_PLAN == nodeType(pRoot)) {
- cout << "nodesNodeToString: "
- << chrono::duration_cast(chrono::steady_clock::now() - start).count() << "us" << endl;
- }
-
string str(pStr);
taosMemoryFreeClear(pStr);
return str;
}
- string toMsg(const SNode* pRoot) {
+ void checkPlanMsg(const SNode* pRoot) {
char* pStr = NULL;
int32_t len = 0;
-
- auto start = chrono::steady_clock::now();
DO_WITH_THROW(nodesNodeToMsg, pRoot, &pStr, &len)
- cout << "nodesNodeToMsg: "
- << chrono::duration_cast(chrono::steady_clock::now() - start).count() << "us" << endl;
string copyStr(pStr, len);
SNode* pNode = NULL;
@@ -491,9 +494,7 @@ class PlannerTestBaseImpl {
nodesDestroyNode(pNode);
taosMemoryFreeClear(pNewStr);
- string str(pStr, len);
taosMemoryFreeClear(pStr);
- return str;
}
caseEnv caseEnv_;
diff --git a/source/libs/planner/test/planTestUtil.h b/source/libs/planner/test/planTestUtil.h
index b0ddd726a6d4c0cf3b9294990e593cf67839823b..be8b51f769d4764048bdf9ded777dfb1f3dd6e56 100644
--- a/source/libs/planner/test/planTestUtil.h
+++ b/source/libs/planner/test/planTestUtil.h
@@ -41,11 +41,12 @@ class PlannerTestBase : public testing::Test {
std::unique_ptr impl_;
};
-extern void setDumpModule(const char* pModule);
-extern void setSkipSqlNum(const char* pNum);
-extern void setLimitSqlNum(const char* pNum);
-extern void setLogLevel(const char* pLogLevel);
-extern void setQueryPolicy(const char* pQueryPolicy);
+extern void setDumpModule(const char* pArg);
+extern void setSkipSqlNum(const char* pArg);
+extern void setLimitSqlNum(const char* pArg);
+extern void setLogLevel(const char* pArg);
+extern void setQueryPolicy(const char* pArg);
+extern void setUseNodeAllocator(const char* pArg);
extern int32_t getLogLevel();
#endif // PLAN_TEST_UTIL_H
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index cd1f6624bdf83e4fe143c1a648e5e30947bcdd65..05730c62acb79129468c09bc1097727bde8b05fc 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -847,7 +847,7 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) {
memcpy(res->datum.p, output.columnData->pData, len);
} else if (IS_VAR_DATA_TYPE(type)) {
//res->datum.p = taosMemoryCalloc(res->node.resType.bytes + VARSTR_HEADER_SIZE + 1, 1);
- res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData), 1);
+ res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData) + 1, 1);
res->node.resType.bytes = varDataTLen(output.columnData->pData);
memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData));
} else {
diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h
index 7fea2867323f9d5f119740d94a80da4f1a0b5b51..a62531a8757f272f2ada32be4966e349ec46e769 100644
--- a/source/libs/scheduler/inc/schInt.h
+++ b/source/libs/scheduler/inc/schInt.h
@@ -254,7 +254,8 @@ typedef struct SSchJob {
SRequestConnInfo conn;
SArray *nodeList; // qnode/vnode list, SArray
SArray *levels; // starting from 0. SArray
- SQueryPlan *pDag;
+ SQueryPlan *pDag;
+ int64_t allocatorRefId;
SArray *dataSrcTasks; // SArray
int32_t levelIdx;
diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c
index 98501427ab7b006daa78bc5d1c6c7c8d377572a0..988049059461e6c88087881028076fa0be02aa74 100644
--- a/source/libs/scheduler/src/schJob.c
+++ b/source/libs/scheduler/src/schJob.c
@@ -673,6 +673,7 @@ void schFreeJobImpl(void *job) {
destroyQueryExecRes(&pJob->execRes);
qDestroyQueryPlan(pJob->pDag);
+ nodesReleaseAllocatorWeakRef(pJob->allocatorRefId);
taosMemoryFreeClear(pJob->userRes.execRes);
taosMemoryFreeClear(pJob->fetchRes);
@@ -724,6 +725,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
pJob->sql = strdup(pReq->sql);
}
pJob->pDag = pReq->pDag;
+ pJob->allocatorRefId = nodesMakeAllocatorWeakRef(pReq->allocatorRefId);
pJob->chkKillFp = pReq->chkKillFp;
pJob->chkKillParam = pReq->chkKillParam;
pJob->userRes.execFp = pReq->execFp;
diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c
index d6e87c27366da27dda96a41c7a9d2fda92c652a9..4a63cd3bb28cdbb31ad4f2ca7531787fccb7e7d4 100644
--- a/source/libs/stream/src/stream.c
+++ b/source/libs/stream/src/stream.c
@@ -182,7 +182,7 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
pReq->upstreamTaskId);
streamTaskEnqueue(pTask, pReq, pRsp);
- tFreeStreamDispatchReq(pReq);
+ tDeleteStreamDispatchReq(pReq);
if (exec) {
if (streamTryExec(pTask) < 0) {
diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c
index cd5f499c34a4db63e6e9c4820f5abeb1076c394a..e6705a77b28a07e04e5439ed1c9bbf561ff7c3df 100644
--- a/source/libs/stream/src/streamData.c
+++ b/source/libs/stream/src/streamData.c
@@ -179,5 +179,15 @@ void streamFreeQitem(SStreamQueueItem* data) {
taosArrayDestroy(pMerge->reqs);
taosArrayDestroy(pMerge->dataRefs);
taosFreeQitem(pMerge);
+ } else if (type == STREAM_INPUT__REF_DATA_BLOCK) {
+ SStreamRefDataBlock* pRefBlock = (SStreamRefDataBlock*)data;
+
+ int32_t ref = atomic_sub_fetch_32(pRefBlock->dataRef, 1);
+ ASSERT(ref >= 0);
+ if (ref == 0) {
+ blockDataDestroy(pRefBlock->pBlock);
+ taosMemoryFree(pRefBlock->dataRef);
+ }
+ taosFreeQitem(pRefBlock);
}
}
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index 7cdb7c0db95cd582fad03174d0fa6927cb1fd668..e6960ae35086c471f7891e551a8fd17ec4776ef1 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -62,7 +62,7 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
return 0;
}
-void tFreeStreamDispatchReq(SStreamDispatchReq* pReq) {
+void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq) {
taosArrayDestroyP(pReq->data, taosMemoryFree);
taosArrayDestroy(pReq->dataLen);
}
@@ -95,7 +95,10 @@ int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq) {
return 0;
}
+void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq) { taosMemoryFree(pReq->pRetrieve); }
+
int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) {
+ int32_t code = -1;
SRetrieveTableRsp* pRetrieve = NULL;
void* buf = NULL;
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
@@ -143,7 +146,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
buf = rpcMallocCont(sizeof(SMsgHead) + len);
if (buf == NULL) {
- goto FAIL;
+ goto CLEAR;
}
((SMsgHead*)buf)->vgId = htonl(pEpInfo->nodeId);
@@ -151,6 +154,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
SEncoder encoder;
tEncoderInit(&encoder, abuf, len);
tEncodeStreamRetrieveReq(&encoder, &req);
+ tEncoderClear(&encoder);
SRpcMsg rpcMsg = {
.code = 0,
@@ -161,17 +165,18 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
if (tmsgSendReq(&pEpInfo->epSet, &rpcMsg) < 0) {
ASSERT(0);
- return -1;
+ goto CLEAR;
}
+ buf = NULL;
qDebug("task %d(child %d) send retrieve req to task %d at node %d, reqId %" PRId64, pTask->taskId,
pTask->selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req.reqId);
}
- return 0;
-FAIL:
- if (pRetrieve) taosMemoryFree(pRetrieve);
- if (buf) taosMemoryFree(buf);
- return -1;
+ code = 0;
+CLEAR:
+ taosMemoryFree(pRetrieve);
+ rpcFreeCont(buf);
+ return code;
}
static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) {
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index 102bad742652005df440b5d4d7a87bcef34ba636..5ad5aa549d28a6b8c4835177dcb11df5418fe57c 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -38,6 +38,9 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
SArray* blocks = pMerged->reqs;
qDebug("task %d %p set submit input (merged), batch num: %d", pTask->taskId, pTask, (int32_t)blocks->size);
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__MERGED_SUBMIT);
+ } else if (pItem->type == STREAM_INPUT__REF_DATA_BLOCK) {
+ const SStreamRefDataBlock* pRefBlock = (const SStreamRefDataBlock*)data;
+ qSetMultiStreamInput(exec, pRefBlock->pBlock, 1, STREAM_INPUT__DATA_BLOCK);
} else {
ASSERT(0);
}
diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c
index 1e68fe346c71d65e0594643da5b94e2dd1ab204d..b604d25816e300560571458fde7153196e77eee5 100644
--- a/source/libs/sync/src/syncCommit.c
+++ b/source/libs/sync/src/syncCommit.c
@@ -69,15 +69,26 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
if (agree) {
// term
- SSyncRaftEntry* pEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, index);
- ASSERT(pEntry != NULL);
-
+ SSyncRaftEntry* pEntry = NULL;
+ SLRUCache* pCache = pSyncNode->pLogStore->pCache;
+ LRUHandle* h = taosLRUCacheLookup(pCache, &index, sizeof(index));
+ if (h) {
+ pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
+ } else {
+ pEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, index);
+ ASSERT(pEntry != NULL);
+ }
// cannot commit, even if quorum agree. need check term!
if (pEntry->term <= pSyncNode->pRaftStore->currentTerm) {
// update commit index
newCommitIndex = index;
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
+
break;
} else {
do {
@@ -88,7 +99,11 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
} while (0);
}
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
}
}
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 6f29b54f806f1113ec69dface7bdcbb4b0c42afc..17157fbd2327ce126b4e63e509c7f0935539860e 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -2581,6 +2581,20 @@ static int32_t syncNodeEqNoop(SSyncNode* ths) {
return ret;
}
+static void deleteCacheEntry(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); }
+
+static int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHandle** h) {
+ int code = 0;
+ int entryLen = sizeof(*pEntry) + pEntry->dataLen;
+ LRUStatus status = taosLRUCacheInsert(pLogStore->pCache, &pEntry->index, sizeof(pEntry->index), pEntry, entryLen,
+ deleteCacheEntry, h, TAOS_LRU_PRIORITY_LOW);
+ if (status != TAOS_LRU_STATUS_OK) {
+ code = -1;
+ }
+
+ return code;
+}
+
static int32_t syncNodeAppendNoop(SSyncNode* ths) {
int32_t ret = 0;
@@ -2589,13 +2603,21 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, ths->vgId);
ASSERT(pEntry != NULL);
+ LRUHandle* h = NULL;
+ syncCacheEntry(ths->pLogStore, pEntry, &h);
+
if (ths->state == TAOS_SYNC_STATE_LEADER) {
int32_t code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry);
ASSERT(code == 0);
syncNodeReplicate(ths, false);
}
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(ths->pLogStore->pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
+
return ret;
}
@@ -2654,6 +2676,9 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncI
SSyncRaftEntry* pEntry = syncEntryBuild2((SyncClientRequest*)pMsg, term, index);
ASSERT(pEntry != NULL);
+ LRUHandle* h = NULL;
+ syncCacheEntry(ths->pLogStore, pEntry, &h);
+
if (ths->state == TAOS_SYNC_STATE_LEADER) {
// append entry
code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry);
@@ -2685,7 +2710,12 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncI
}
}
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(ths->pLogStore->pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
+
return ret;
}
@@ -2973,9 +3003,15 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex,
for (SyncIndex i = beginIndex; i <= endIndex; ++i) {
if (i != SYNC_INDEX_INVALID) {
SSyncRaftEntry* pEntry;
- code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, i, &pEntry);
- ASSERT(code == 0);
- ASSERT(pEntry != NULL);
+ SLRUCache* pCache = ths->pLogStore->pCache;
+ LRUHandle* h = taosLRUCacheLookup(pCache, &i, sizeof(i));
+ if (h) {
+ pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
+ } else {
+ code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, i, &pEntry);
+ ASSERT(code == 0);
+ ASSERT(pEntry != NULL);
+ }
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg);
@@ -3058,7 +3094,11 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex,
}
rpcFreeCont(rpcMsg.pCont);
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
}
}
}
diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c
index 0649e064e45391cfe9082c24264a33b762d1a279..496c8419de7f56a96d544997f989ef6d16de4de3 100644
--- a/source/libs/sync/src/syncRaftLog.c
+++ b/source/libs/sync/src/syncRaftLog.c
@@ -53,6 +53,15 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
SSyncLogStore* pLogStore = taosMemoryMalloc(sizeof(SSyncLogStore));
ASSERT(pLogStore != NULL);
+ pLogStore->pCache = taosLRUCacheInit(10 * 1024 * 1024, 1, .5);
+ if (pLogStore->pCache == NULL) {
+ terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
+ taosMemoryFree(pLogStore);
+ return NULL;
+ }
+
+ taosLRUCacheSetStrictCapacity(pLogStore->pCache, false);
+
pLogStore->data = taosMemoryMalloc(sizeof(SSyncLogStoreData));
ASSERT(pLogStore->data != NULL);
@@ -102,6 +111,10 @@ void logStoreDestory(SSyncLogStore* pLogStore) {
taosThreadMutexDestroy(&(pData->mutex));
taosMemoryFree(pLogStore->data);
+
+ taosLRUCacheEraseUnrefEntries(pLogStore->pCache);
+ taosLRUCacheCleanup(pLogStore->pCache);
+
taosMemoryFree(pLogStore);
}
}
@@ -243,7 +256,7 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr
static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry) {
SSyncLogStoreData* pData = pLogStore->data;
SWal* pWal = pData->pWal;
- int32_t code;
+ int32_t code = 0;
*ppEntry = NULL;
@@ -257,6 +270,7 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index,
taosThreadMutexLock(&(pData->mutex));
code = walReadVer(pWalHandle, index);
+ // code = walReadVerCached(pWalHandle, index);
if (code != 0) {
int32_t err = terrno;
const char* errStr = tstrerror(err);
@@ -412,6 +426,7 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
ASSERT(pWalHandle != NULL);
int32_t code = walReadVer(pWalHandle, index);
+ // int32_t code = walReadVerCached(pWalHandle, index);
if (code != 0) {
int32_t err = terrno;
const char* errStr = tstrerror(err);
diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c
index 1e2eefabf42bd97644baf38e19b77595529aab38..f4878ea861b342724896e844d4796d4bdd598c01 100644
--- a/source/libs/tdb/src/db/tdbPage.c
+++ b/source/libs/tdb/src/db/tdbPage.c
@@ -80,6 +80,7 @@ int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg)
ASSERT(xFree);
for (int iOvfl = 0; iOvfl < pPage->nOverflow; iOvfl++) {
+ tdbDebug("tdbPage/destroy/free ovfl cell: %p/%p", pPage->apOvfl[iOvfl], pPage);
tdbOsFree(pPage->apOvfl[iOvfl]);
}
@@ -152,7 +153,7 @@ int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl
pNewCell = (SCell *)tdbOsMalloc(szCell);
memcpy(pNewCell, pCell, szCell);
- tdbDebug("tdbPage/new ovfl cell: %p", pNewCell);
+ tdbDebug("tdbPage/insert/new ovfl cell: %p/%p", pNewCell, pPage);
pPage->apOvfl[iOvfl] = pNewCell;
pPage->aiOvfl[iOvfl] = idx;
@@ -202,7 +203,7 @@ int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt) {
if (pPage->aiOvfl[iOvfl] == idx) {
// remove the over flow cell
tdbOsFree(pPage->apOvfl[iOvfl]);
- tdbDebug("tdbPage/free ovfl cell: %p", pPage->apOvfl[iOvfl]);
+ tdbDebug("tdbPage/drop/free ovfl cell: %p", pPage->apOvfl[iOvfl]);
for (; (++iOvfl) < pPage->nOverflow;) {
pPage->aiOvfl[iOvfl - 1] = pPage->aiOvfl[iOvfl] - 1;
pPage->apOvfl[iOvfl - 1] = pPage->apOvfl[iOvfl];
@@ -255,6 +256,7 @@ void tdbPageCopy(SPage *pFromPage, SPage *pToPage, int deepCopyOvfl) {
int szCell = (*pFromPage->xCellSize)(pFromPage, pFromPage->apOvfl[iOvfl], 0, NULL, NULL);
pNewCell = (SCell *)tdbOsMalloc(szCell);
memcpy(pNewCell, pFromPage->apOvfl[iOvfl], szCell);
+ tdbDebug("tdbPage/copy/new ovfl cell: %p/%p/%p", pNewCell, pToPage, pFromPage);
}
pToPage->apOvfl[iOvfl] = pNewCell;
diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c
index 5284aeff779f6b26f6f5a39aba61c781264108fd..c69046f707a1fddb7a593771ad15535a70615ff8 100644
--- a/source/libs/wal/src/walMeta.c
+++ b/source/libs/wal/src/walMeta.c
@@ -116,7 +116,6 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
}
#endif
}
- // TODO truncate file
if (found == NULL) {
// file corrupted, no complete log
@@ -125,8 +124,20 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
+
+ // truncate file
SWalCkHead* lastEntry = (SWalCkHead*)found;
int64_t retVer = lastEntry->head.version;
+ int64_t lastEntryBeginOffset = offset + (int64_t)((char*)found - (char*)buf);
+ int64_t lastEntryEndOffset = lastEntryBeginOffset + sizeof(SWalCkHead) + lastEntry->head.bodyLen;
+ if (lastEntryEndOffset != fileSize) {
+ wWarn("vgId:%d repair meta truncate file %s to %ld, orig size %ld", pWal->cfg.vgId, fnameStr, lastEntryEndOffset,
+ fileSize);
+ taosFtruncateFile(pFile, lastEntryEndOffset);
+ ((SWalFileInfo*)taosArrayGetLast(pWal->fileInfoSet))->fileSize = lastEntryEndOffset;
+ pWal->totSize -= (fileSize - lastEntryEndOffset);
+ }
+
taosCloseFile(&pFile);
taosMemoryFree(buf);
@@ -226,16 +237,92 @@ int walCheckAndRepairMeta(SWal* pWal) {
}
}
- // TODO: set fileSize and lastVer if necessary
-
return 0;
}
int walCheckAndRepairIdx(SWal* pWal) {
- // TODO: iterate all log files
- // if idx not found, scan log and write idx
- // if found, check complete by first and last entry of each idx file
- // if idx incomplete, binary search last valid entry, and then build other part
+ int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
+ for (int32_t i = 0; i < sz; i++) {
+ SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, i);
+
+ char fnameStr[WAL_FILE_LEN];
+ walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr);
+ int64_t fsize;
+ TdFilePtr pIdxFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE);
+ if (pIdxFile == NULL) {
+ ASSERT(0);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ wError("vgId:%d, cannot open file %s, since %s", pWal->cfg.vgId, fnameStr, terrstr());
+ return -1;
+ }
+
+ taosFStatFile(pIdxFile, &fsize, NULL);
+ if (fsize == (pFileInfo->lastVer - pFileInfo->firstVer + 1) * sizeof(SWalIdxEntry)) {
+ taosCloseFile(&pIdxFile);
+ continue;
+ }
+
+ int32_t left = fsize % sizeof(SWalIdxEntry);
+ int64_t offset = taosLSeekFile(pIdxFile, -left, SEEK_END);
+ if (left != 0) {
+ taosFtruncateFile(pIdxFile, offset);
+ wWarn("vgId:%d wal truncate file %s to offset %ld since size invalid, file size %ld", pWal->cfg.vgId, fnameStr,
+ offset, fsize);
+ }
+ offset -= sizeof(SWalIdxEntry);
+
+ SWalIdxEntry idxEntry = {.ver = pFileInfo->firstVer};
+ while (1) {
+ if (offset < 0) {
+ taosLSeekFile(pIdxFile, 0, SEEK_SET);
+ taosWriteFile(pIdxFile, &idxEntry, sizeof(SWalIdxEntry));
+ break;
+ }
+ taosLSeekFile(pIdxFile, offset, SEEK_SET);
+ int64_t contLen = taosReadFile(pIdxFile, &idxEntry, sizeof(SWalIdxEntry));
+ if (contLen < 0 || contLen != sizeof(SWalIdxEntry)) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+ if ((idxEntry.ver - pFileInfo->firstVer) * sizeof(SWalIdxEntry) != offset) {
+ taosFtruncateFile(pIdxFile, offset);
+ wWarn("vgId:%d wal truncate file %s to offset %ld since entry invalid, entry ver %ld, entry offset %ld",
+ pWal->cfg.vgId, fnameStr, offset, idxEntry.ver, idxEntry.offset);
+ offset -= sizeof(SWalIdxEntry);
+ } else {
+ break;
+ }
+ }
+
+ if (idxEntry.ver < pFileInfo->lastVer) {
+ char fLogNameStr[WAL_FILE_LEN];
+ walBuildLogName(pWal, pFileInfo->firstVer, fLogNameStr);
+ TdFilePtr pLogFile = taosOpenFile(fLogNameStr, TD_FILE_READ);
+ if (pLogFile == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ wError("vgId:%d, cannot open file %s, since %s", pWal->cfg.vgId, fLogNameStr, terrstr());
+ return -1;
+ }
+ while (idxEntry.ver < pFileInfo->lastVer) {
+ taosLSeekFile(pLogFile, idxEntry.offset, SEEK_SET);
+ SWalCkHead ckHead;
+ taosReadFile(pLogFile, &ckHead, sizeof(SWalCkHead));
+ if (idxEntry.ver != ckHead.head.version) {
+ // todo truncate this idx also
+ taosCloseFile(&pLogFile);
+ wError("vgId:%d, invalid repair case, log seek to %ld to find ver %ld, actual ver %ld", pWal->cfg.vgId,
+ idxEntry.offset, idxEntry.ver, ckHead.head.version);
+ return -1;
+ }
+ idxEntry.ver = ckHead.head.version + 1;
+ idxEntry.offset = idxEntry.offset + sizeof(SWalCkHead) + ckHead.head.bodyLen;
+ wWarn("vgId:%d wal idx append new entry %ld %ld", pWal->cfg.vgId, idxEntry.ver, idxEntry.offset);
+ taosWriteFile(pIdxFile, &idxEntry, sizeof(SWalIdxEntry));
+ }
+ taosCloseFile(&pLogFile);
+ }
+ taosCloseFile(&pIdxFile);
+ }
return 0;
}
diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c
index c939c8c43685ef3dc6ea0b4fde1cd5fbfb33a8d1..a55f00d27702294f6bf996690c80ca5e3765428a 100644
--- a/source/libs/wal/src/walMgmt.c
+++ b/source/libs/wal/src/walMgmt.c
@@ -149,15 +149,21 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) {
walLoadMeta(pWal);
if (walCheckAndRepairMeta(pWal) < 0) {
+ wError("vgId:%d cannot open wal since repair meta file failed", pWal->cfg.vgId);
taosHashCleanup(pWal->pRefHash);
taosRemoveRef(tsWal.refSetId, pWal->refId);
taosThreadMutexDestroy(&pWal->mutex);
taosArrayDestroy(pWal->fileInfoSet);
- taosMemoryFree(pWal);
return NULL;
}
if (walCheckAndRepairIdx(pWal) < 0) {
+ wError("vgId:%d cannot open wal since repair idx file failed", pWal->cfg.vgId);
+ taosHashCleanup(pWal->pRefHash);
+ taosRemoveRef(tsWal.refSetId, pWal->refId);
+ taosThreadMutexDestroy(&pWal->mutex);
+ taosArrayDestroy(pWal->fileInfoSet);
+ return NULL;
}
wDebug("vgId:%d, wal:%p is opened, level:%d fsyncPeriod:%d", pWal->cfg.vgId, pWal, pWal->cfg.level,
diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c
index 2767fed9373aa47ebdbea39b07f28c238db14c7d..e1a8697ad53519b3f0db59f28ee6f82355ac82dd 100644
--- a/source/util/src/tpagedbuf.c
+++ b/source/util/src/tpagedbuf.c
@@ -284,7 +284,6 @@ static char* evacOneDataPage(SDiskbasedBuf* pBuf) {
// all pages are referenced by user, try to allocate new space
if (pn == NULL) {
- assert(0);
int32_t prev = pBuf->inMemPages;
// increase by 50% of previous mem pages
@@ -304,7 +303,6 @@ static char* evacOneDataPage(SDiskbasedBuf* pBuf) {
bufPage = flushPageToDisk(pBuf, d);
}
- ASSERT((bufPage != NULL) || terrno != TSDB_CODE_SUCCESS);
return bufPage;
}
@@ -377,12 +375,6 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) {
char* availablePage = NULL;
if (NO_IN_MEM_AVAILABLE_PAGES(pBuf)) {
availablePage = evacOneDataPage(pBuf);
-
- // Failed to allocate a new buffer page, and there is an error occurs.
- if (availablePage == NULL) {
- assert(0);
- return NULL;
- }
}
SPageInfo* pi = NULL;
@@ -652,4 +644,4 @@ void clearDiskbasedBuf(SDiskbasedBuf* pBuf) {
pBuf->totalBufSize = 0;
pBuf->allocateId = -1;
pBuf->fileSize = 0;
-}
\ No newline at end of file
+}
diff --git a/tests/docs-examples-test/csharp.sh b/tests/docs-examples-test/csharp.sh
index a8f1ce4119f0f41d8372c9ceb8fef6053caa1563..d7f2670478fa593e1d4c81ccc50c04248a4693cc 100644
--- a/tests/docs-examples-test/csharp.sh
+++ b/tests/docs-examples-test/csharp.sh
@@ -6,23 +6,24 @@ pgrep taosd || taosd >> /dev/null 2>&1 &
pgrep taosadapter || taosadapter >> /dev/null 2>&1 &
cd ../../docs/examples/csharp
-dotnet run --project connect.csproj
+#dotnet run --project connect.csproj
-taos -s "drop database if exists power"
-dotnet run --project sqlinsert.csproj
-dotnet run --project query.csproj
-dotnet run --project asyncquery.csproj
-dotnet run --project subscribe.csproj
+#taos -s "drop database if exists power"
+#dotnet run --project sqlinsert.csproj
+#dotnet run --project query.csproj
+#dotnet run --project asyncquery.csproj
+#dotnet run --project subscribe.csproj
-taos -s "drop topic if exists topic_example"
-taos -s "drop database if exists power"
-dotnet run --project stmtinsert.csproj
+#taos -s "drop topic if exists topic_example"
+#taos -s "drop database if exists power"
+#dotnet run --project stmtinsert.csproj
-taos -s "drop database if exists test"
-dotnet run --project influxdbline.csproj
+#taos -s "drop database if exists test"
+#dotnet run --project influxdbline.csproj
-taos -s "drop database if exists test"
-dotnet run --project optstelnet.csproj
+#taos -s "drop database if exists test"
+#dotnet run --project optstelnet.csproj
-taos -s "drop database if exists test"
-dotnet run --project optsjson.csproj
\ No newline at end of file
+#taos -s "drop database if exists test"
+#dotnet run --project optsjson.csproji
+echo "uncomment temporily"
diff --git a/tests/parallel_test/run_container.sh b/tests/parallel_test/run_container.sh
index f0ee9be46fa5c3f399cde738cad29aa3f03ea7b8..bb57f238f029e2098f70fc553a8e6280ff46e5c4 100755
--- a/tests/parallel_test/run_container.sh
+++ b/tests/parallel_test/run_container.sh
@@ -79,9 +79,11 @@ fi
ulimit -c unlimited
TMP_DIR=$WORKDIR/tmp
-
+SOURCEDIR=$WORKDIR/src
MOUNT_DIR=""
+packageName="TDengine-server-3.0.1.0-Linux-x64.tar.gz"
rm -rf ${TMP_DIR}/thread_volume/$thread_no/sim
+mkdir -p $SOURCEDIR
mkdir -p ${TMP_DIR}/thread_volume/$thread_no/sim/tsim
mkdir -p ${TMP_DIR}/thread_volume/$thread_no/coredump
rm -rf ${TMP_DIR}/thread_volume/$thread_no/coredump/*
@@ -90,6 +92,11 @@ if [ ! -d "${TMP_DIR}/thread_volume/$thread_no/$exec_dir" ]; then
echo "cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/"
cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/
fi
+
+if [ ! -f "${SOURCEDIR}/${packageName}" ]; then
+ wget -P ${SOURCEDIR} https://taosdata.com/assets-download/3.0/${packageName}
+fi
+
MOUNT_DIR="$TMP_DIR/thread_volume/$thread_no/$exec_dir:$CONTAINER_TESTDIR/tests/$exec_dir"
echo "$thread_no -> ${exec_dir}:$cmd"
coredump_dir=`cat /proc/sys/kernel/core_pattern | xargs dirname`
@@ -97,6 +104,7 @@ coredump_dir=`cat /proc/sys/kernel/core_pattern | xargs dirname`
docker run \
-v $REP_MOUNT_PARAM \
-v $MOUNT_DIR \
+ -v ${SOURCEDIR}:/usr/local/src/ \
-v "$TMP_DIR/thread_volume/$thread_no/sim:${SIM_DIR}" \
-v ${TMP_DIR}/thread_volume/$thread_no/coredump:$coredump_dir \
-v $WORKDIR/taos-connector-python/taos:/usr/local/lib/python3.8/site-packages/taos:ro \
diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py
new file mode 100644
index 0000000000000000000000000000000000000000..25b023bb76e9e9f34f288624a5f33daeca78d717
--- /dev/null
+++ b/tests/system-test/0-others/compatibility.py
@@ -0,0 +1,157 @@
+from urllib.parse import uses_relative
+import taos
+import sys
+import os
+import time
+
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.dnodes import TDDnodes
+from util.dnodes import TDDnode
+from util.cluster import *
+
+
+class TDTestCase:
+ def caseDescription(self):
+ '''
+ 3.0 data compatibility test
+ case1: basedata version is 3.0.1.0
+ '''
+ return
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files or "taosd.exe" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def getCfgPath(self):
+ buildPath = self.getBuildPath()
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgPath = buildPath + "/../sim/dnode1/cfg/"
+ else:
+ cfgPath = buildPath + "/../sim/dnode1/cfg/"
+
+ return cfgPath
+
+ def installTaosd(self,bPath,cPath):
+ # os.system(f"rmtaos && mkdir -p {self.getBuildPath()}/build/lib/temp && mv {self.getBuildPath()}/build/lib/libtaos.so* {self.getBuildPath()}/build/lib/temp/ ")
+ # os.system(f" mv {bPath}/build {bPath}/build_bak ")
+ # os.system(f"mv {self.getBuildPath()}/build/lib/libtaos.so {self.getBuildPath()}/build/lib/libtaos.so_bak ")
+ # os.system(f"mv {self.getBuildPath()}/build/lib/libtaos.so.1 {self.getBuildPath()}/build/lib/libtaos.so.1_bak ")
+
+ packagePath="/usr/local/src/"
+ packageName="TDengine-server-3.0.1.0-Linux-x64.tar.gz"
+ os.system(f"cd {packagePath} && tar xvf TDengine-server-3.0.1.0-Linux-x64.tar.gz && cd TDengine-server-3.0.1.0 && ./install.sh -e no " )
+ tdDnodes.stop(1)
+ print(f"start taosd: nohup taosd -c {cPath} & ")
+ os.system(f" nohup taosd -c {cPath} & " )
+ sleep(1)
+
+
+
+ def buildTaosd(self,bPath):
+ # os.system(f"mv {bPath}/build_bak {bPath}/build ")
+ os.system(f" cd {bPath} && make install ")
+
+
+ def run(self):
+ bPath=self.getBuildPath()
+ cPath=self.getCfgPath()
+ dbname = "test"
+ stb = f"{dbname}.meters"
+ self.installTaosd(bPath,cPath)
+ tableNumbers=100
+ recordNumbers1=100
+ recordNumbers2=1000
+ tdsqlF=tdCom.newTdSql()
+ print(tdsqlF)
+ tdsqlF.query(f"SELECT SERVER_VERSION();")
+ print(tdsqlF.query(f"SELECT SERVER_VERSION();"))
+ oldServerVersion=tdsqlF.queryResult[0][0]
+ tdLog.info(f"Base server version is {oldServerVersion}")
+ tdsqlF.query(f"SELECT CLIENT_VERSION();")
+ # the oldClientVersion can't be updated in the same python process,so the version is new compiled verison
+ oldClientVersion=tdsqlF.queryResult[0][0]
+ tdLog.info(f"Base client version is {oldClientVersion}")
+
+ tdLog.printNoPrefix(f"==========step1:prepare and check data in old version-{oldServerVersion}")
+ tdLog.info(f"taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
+ os.system(f"taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
+ sleep(3)
+
+ # tdsqlF.query(f"select count(*) from {stb}")
+ # tdsqlF.checkData(0,0,tableNumbers*recordNumbers1)
+ os.system("pkill taosd")
+ sleep(1)
+
+ tdLog.printNoPrefix("==========step2:update new version ")
+ self.buildTaosd(bPath)
+ tdDnodes.start(1)
+ sleep(1)
+ tdsql=tdCom.newTdSql()
+ print(tdsql)
+
+
+ tdsql.query(f"SELECT SERVER_VERSION();")
+ nowServerVersion=tdsql.queryResult[0][0]
+ tdLog.info(f"New server version is {nowServerVersion}")
+ tdsql.query(f"SELECT CLIENT_VERSION();")
+ nowClientVersion=tdsql.queryResult[0][0]
+ tdLog.info(f"New client version is {nowClientVersion}")
+
+ tdLog.printNoPrefix(f"==========step3:prepare and check data in new version-{nowServerVersion}")
+ tdsql.query(f"select count(*) from {stb}")
+ tdsql.checkData(0,0,tableNumbers*recordNumbers1)
+ os.system(f"taosBenchmark -t {tableNumbers} -n {recordNumbers2} -y ")
+ tdsql.query(f"select count(*) from {stb}")
+ tdsql.checkData(0,0,tableNumbers*recordNumbers2)
+
+ tdsql=tdCom.newTdSql()
+ tdLog.printNoPrefix(f"==========step4:verify backticks in taos Sql-TD18542")
+ tdsql.execute("drop database if exists db")
+ tdsql.execute("create database db")
+ tdsql.execute("use db")
+ tdsql.execute("create stable db.stb1 (ts timestamp, c1 int) tags (t1 int);")
+ tdsql.execute("insert into db.ct1 using db.stb1 TAGS(1) values(now(),11);")
+ tdsql.error(" insert into `db.ct2` using db.stb1 TAGS(9) values(now(),11);")
+ tdsql.error(" insert into db.`db.ct2` using db.stb1 TAGS(9) values(now(),11);")
+ tdsql.execute("insert into `db`.ct3 using db.stb1 TAGS(3) values(now(),13);")
+ tdsql.query("select * from db.ct3")
+ tdsql.checkData(0,1,13)
+ tdsql.execute("insert into db.`ct4` using db.stb1 TAGS(4) values(now(),14);")
+ tdsql.query("select * from db.ct4")
+ tdsql.checkData(0,1,14)
+ tdsql.query("describe information_schema.ins_databases;")
+ qRows=tdsql.queryRows
+ for i in range(qRows) :
+ if tdsql.queryResult[i][0]=="retentions" :
+ return True
+ else:
+ return False
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/database_pre_suf.py b/tests/system-test/1-insert/database_pre_suf.py
new file mode 100755
index 0000000000000000000000000000000000000000..fe788af0db43d39fe38c5d2fb5ffe21e8dfafeb4
--- /dev/null
+++ b/tests/system-test/1-insert/database_pre_suf.py
@@ -0,0 +1,377 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import os
+import time
+import taos
+import subprocess
+from faker import Faker
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+from util.dnodes import *
+
+class TDTestCase:
+ updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
+
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.testcasePath = os.path.split(__file__)[0]
+ self.testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.db = "pre_suf"
+
+ def dropandcreateDB_random(self,database,n,vgroups,table_prefix,table_suffix,check_result):
+ ts = 1630000000000
+ num_random = 100
+ fake = Faker('zh_CN')
+ tdSql.execute('''drop database if exists %s ;''' %database)
+ tdSql.execute('''create database %s keep 36500 vgroups %d table_prefix %d table_suffix %d;'''%(database,vgroups,table_prefix,table_suffix))
+ tdSql.execute('''use %s;'''%database)
+
+ tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
+ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''')
+ tdSql.execute('''create stable stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
+ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);''')
+
+ for i in range(10*n):
+ tdSql.execute('''create table bj_%d (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%i)
+ tdSql.execute('''create table sh_%d (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%i)
+ tdSql.execute('''create table bj_table_%d_r (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%i)
+ tdSql.execute('''create table sh_table_%d_r (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%i)
+ tdSql.execute('''create table hn_table_%d_r \
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_binary1 binary(100) , q_nchar1 nchar(100) ,q_binary2 binary(100) , q_nchar2 nchar(100) ,q_binary3 binary(100) , q_nchar3 nchar(100) ,q_binary4 binary(100) , q_nchar4 nchar(100) ,\
+ q_binary5 binary(100) , q_nchar5 nchar(100) ,q_binary6 binary(100) , q_nchar6 nchar(100) ,q_binary7 binary(100) , q_nchar7 nchar(100) ,q_binary8 binary(100) , q_nchar8 nchar(100) ,\
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) ;'''%i)
+ tdSql.execute('''create table bj_stable_1_%d using stable_1 tags('bj_stable_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table sh_table_%d_a using stable_1 tags('sh_a_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table sh_table_%d_b using stable_1 tags('sh_b_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table sh_table_%d_c using stable_1 tags('sh_c_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+ tdSql.execute('''create table bj_table_%d_a using stable_1 tags('bj_a_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table bj_table_%d_b using stable_1 tags('bj_b_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table bj_table_%d_c using stable_1 tags('bj_c_table_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+
+ tdSql.execute('''create table tj_table_%d_a using stable_2 tags('tj_a_table_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table tj_table_%d_b using stable_2 tags('tj_b_table_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(i,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+
+ # create stream
+ tdSql.execute('''create stream current_stream into stream_max_stable_1 as select _wstart as start, _wend as end, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s);''')
+
+ # insert data
+ for i in range(num_random*n):
+ tdSql.execute('''insert into bj_stable_1_1 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1),
+ fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+ tdSql.execute('''insert into hn_table_1_r (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=-2147483647, max=2147483647, step=1) ,
+ fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1) ,
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.execute('''insert into bj_stable_1_2 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8)\
+ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1),
+ fake.random_int(min=0, max=9223372036854775807, step=1),
+ fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+ tdSql.execute('''insert into hn_table_2_r (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=0, max=2147483647, step=1),
+ fake.random_int(min=0, max=9223372036854775807, step=1),
+ fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.execute('''insert into bj_stable_1_3 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.execute('''insert into bj_stable_1_4 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000 +1, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.execute('''insert into bj_stable_1_5 (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts,\
+ q_binary1 , q_nchar1 , q_binary2 , q_nchar2 , q_binary3 , q_nchar3 , q_binary4 , q_nchar4 , q_binary5 , q_nchar5 , q_binary6 , q_nchar6 , q_binary7 , q_nchar7, q_binary8 , q_nchar8) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d, 'binary1.%s', 'nchar1.%s', 'binary2.%s', 'nchar2.%s', 'binary3.%s', 'nchar3.%s', \
+ 'binary4.%s', 'nchar4.%s', 'binary5.%s', 'nchar5.%s', 'binary6.%s', 'nchar6.%s', 'binary7.%s', 'nchar7.%s', 'binary8.%s', 'nchar8.%s') ;'''
+ % (ts + i*1000 +10, fake.random_int(min=-0, max=2147483647, step=1),
+ fake.random_int(min=-0, max=9223372036854775807, step=1),
+ fake.random_int(min=-0, max=32767, step=1) , fake.random_int(min=-0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i, fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() ,
+ fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr() , fake.pystr()))
+
+ tdSql.query("select count(*) from stable_1;")
+ tdSql.checkData(0,0,5*num_random*n)
+ tdSql.query("select count(*) from hn_table_1_r;")
+ tdSql.checkData(0,0,num_random*n)
+
+ # stream data check
+ tdSql.query("select start,end,max_int from stream_max_stable_1 ;")
+ tdSql.checkRows(20)
+ tdSql.query("select sum(max_int) from stream_max_stable_1 ;")
+ stream_data_1 = tdSql.queryResult[0][0]
+ tdSql.query("select sum(min_int) from stream_max_stable_1 ;")
+ stream_data_2 = tdSql.queryResult[0][0]
+ tdSql.query("select sum(max_int),sum(min_int) from (select _wstart as start, _wend as end, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 where ts is not null interval (5s));")
+ sql_data_1 = tdSql.queryResult[0][0]
+ sql_data_2 = tdSql.queryResult[0][1]
+
+ self.stream_value_check(stream_data_1,sql_data_1)
+ self.stream_value_check(stream_data_2,sql_data_2)
+
+ tdSql.query("select sum(max_int),sum(min_int) from (select _wstart as start, _wend as end, max(q_int) as max_int, min(q_bigint) as min_int from stable_1 interval (5s));")
+ sql_data_1 = tdSql.queryResult[0][0]
+ sql_data_2 = tdSql.queryResult[0][1]
+
+ self.stream_value_check(stream_data_1,sql_data_1)
+ self.stream_value_check(stream_data_2,sql_data_2)
+
+ tdSql.query("select max(max_int) from stream_max_stable_1 ;")
+ stream_data_1 = tdSql.queryResult[0][0]
+ tdSql.query("select min(min_int) from stream_max_stable_1 ;")
+ stream_data_2 = tdSql.queryResult[0][0]
+ tdSql.query("select max(q_int) as max_int, min(q_bigint) as min_int from stable_1;")
+ sql_data_1 = tdSql.queryResult[0][0]
+ sql_data_2 = tdSql.queryResult[0][1]
+
+ self.stream_value_check(stream_data_1,sql_data_1)
+ self.stream_value_check(stream_data_2,sql_data_2)
+
+
+ tdSql.query(" select * from information_schema.ins_databases where name = '%s';" %database)
+ print(tdSql.queryResult)
+
+ # tdSql.query(" select table_prefix,table_suffix from information_schema.ins_databases where name = '%s';" %database)
+ # print(tdSql.queryResult)
+ #TD-19082
+
+ #tdSql.query(" select * from information_schema.ins_tables where db_name = '%s';" %database)
+ #print(tdSql.queryResult)
+
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s';" %database)
+ queryRows = len(tdSql.queryResult)
+ for i in range(queryRows):
+ print("row=%d, vgroup_id=%s, tbname=%s " %(i,tdSql.queryResult[i][1],tdSql.queryResult[i][0]))
+
+ tdLog.info("\n=============flush database ====================\n")
+
+ tdSql.execute(" flush database %s;" %database)
+
+ tdSql.query(" select * from information_schema.ins_databases where name = '%s';" %database)
+ print(tdSql.queryResult)
+
+ # tdSql.query(" select table_prefix,table_suffix from information_schema.ins_databases where name = '%s';" %database)
+ # print(tdSql.queryResult)
+ #TD-19082
+
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s';" %database)
+ queryRows = len(tdSql.queryResult)
+ for i in range(queryRows):
+ print("row=%d, vgroup_id=%s, tbname=%s " %(i,tdSql.queryResult[i][1],tdSql.queryResult[i][0]))
+
+
+ # check in one vgroup
+ if check_result == 'Y':
+ #base table : sh_table_0_a
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_0_a';" %(database))
+ base_value_table_name = tdSql.queryResult[0][0]
+ base_value_table_vgroup = tdSql.queryResult[0][1]
+
+ #check table :sh_table_i_a
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'sh_table_%%_a';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_%d_a';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :sh_table_i_b
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'sh_table_%%_b';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_%d_b';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :sh_table_i_c
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'sh_table_%%_c';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_%d_c';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :sh_table_i_r
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'sh_table_%%_r';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='sh_table_%d_r';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :bj_table_i_a
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'bj_table_%%_a';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_%d_a';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :bj_table_i_b
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'bj_table_%%_b';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_%d_b';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :bj_table_i_c
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'bj_table_%%_c';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_%d_c';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :bj_table_i_r
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'bj_table_%%_r';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='bj_table_%d_r';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :hn_table_i_r
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'hn_table_%%_r';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='hn_table_%d_r';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :tj_table_i_a
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'tj_table_%%_a';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='tj_table_%d_a';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ #check table :tj_table_i_b
+ check_rows = tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name like 'tj_table_%%_b';" %(database))
+ for i in range(check_rows):
+ tdSql.query(" select table_name,vgroup_id from information_schema.ins_tables where db_name = '%s' and table_name='tj_table_%d_b';" %(database,i))
+ self.value_check(base_value_table_name,base_value_table_vgroup)
+
+ else:
+ pass
+
+ def value_check(self,base_value_table_name,base_value_table_vgroup):
+ check_value_table_name = tdSql.queryResult[0][0]
+ check_value_table_vgroup = tdSql.queryResult[0][1]
+ #tdLog.info(f"{base_value_table_name},{base_value_table_vgroup},{check_value_table_name},{check_value_table_vgroup}")
+
+ if base_value_table_vgroup==check_value_table_vgroup:
+ tdLog.info(f"checkEqual success, base_table_name={base_value_table_name},base_table_host={base_value_table_vgroup} ,check_table_name={check_value_table_name},check_table_host={check_value_table_vgroup}")
+ else :
+ tdLog.exit(f"checkEqual error, base_table_name=={base_value_table_name},base_table_host={base_value_table_vgroup} ,check_table_name={check_value_table_name},check_table_host={check_value_table_vgroup}")
+
+ def stream_value_check(self,stream_data,sql_data):
+ if stream_data==sql_data:
+ tdLog.info(f"checkEqual success, stream_data={stream_data},sql_data={sql_data}")
+ else :
+ tdLog.exit(f"checkEqual error, stream_data=={stream_data},sql_data={sql_data}")
+
+ def run(self):
+ startTime = time.time()
+
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ #(self,database,n,vgroups,table_prefix,table_suffix)
+ self.dropandcreateDB_random("%s" %self.db, 1,2,0,0,'N')
+ self.dropandcreateDB_random("%s" %self.db, 1,2,0,2,'N')
+ self.dropandcreateDB_random("%s" %self.db, 1,2,2,0,'N')
+ self.dropandcreateDB_random("%s" %self.db, 1,2,3,3,'Y')
+ self.dropandcreateDB_random("%s" %self.db, 1,3,3,3,'Y')
+ self.dropandcreateDB_random("%s" %self.db, 1,4,4,4,'Y')
+ self.dropandcreateDB_random("%s" %self.db, 1,5,5,5,'Y')
+
+
+ #taos -f sql
+ print("taos -f sql start!")
+ taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
+ _ = subprocess.check_output(taos_cmd1, shell=True)
+ print("taos -f sql over!")
+
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 604add0eb3053bce78f70a824142a8e1263c58ac..684d6a16e20f8d7556e74b34d3578738d265f411 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -17,6 +17,7 @@ python3 ./test.py -f 0-others/udf_cfg2.py
python3 ./test.py -f 0-others/sysinfo.py
python3 ./test.py -f 0-others/user_control.py
python3 ./test.py -f 0-others/fsync.py
+python3 ./test.py -f 0-others/compatibility.py
python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py
@@ -35,6 +36,7 @@ python3 ./test.py -f 1-insert/mutil_stage.py
python3 ./test.py -f 1-insert/table_param_ttl.py -R
python3 ./test.py -f 1-insert/update_data_muti_rows.py
python3 ./test.py -f 1-insert/db_tb_name_check.py
+python3 ./test.py -f 1-insert/database_pre_suf.py
python3 ./test.py -f 2-query/abs.py
python3 ./test.py -f 2-query/abs.py -R
@@ -156,8 +158,8 @@ python3 ./test.py -f 2-query/sin.py
python3 ./test.py -f 2-query/sin.py -R
python3 ./test.py -f 2-query/smaTest.py
python3 ./test.py -f 2-query/smaTest.py -R
-#python3 ./test.py -f 2-query/sml.py
-#python3 ./test.py -f 2-query/sml.py -R
+python3 ./test.py -f 2-query/sml.py
+python3 ./test.py -f 2-query/sml.py -R
python3 ./test.py -f 2-query/spread.py
python3 ./test.py -f 2-query/spread.py -R
python3 ./test.py -f 2-query/sqrt.py
@@ -512,6 +514,6 @@ python3 ./test.py -f 2-query/count_partition.py -Q 3
python3 ./test.py -f 2-query/max_partition.py -Q 3
python3 ./test.py -f 2-query/last_row.py -Q 3
python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
-#python3 ./test.py -f 2-query/sml.py -Q 3
+python3 ./test.py -f 2-query/sml.py -Q 3
python3 ./test.py -f 2-query/interp.py -Q 3
diff --git a/tools/shell/src/shellUtil.c b/tools/shell/src/shellUtil.c
index 0430428c381bbcee3f4abf51a44e61bc849f094d..8c47d165557317dbcf710dda7d72b77037340692 100644
--- a/tools/shell/src/shellUtil.c
+++ b/tools/shell/src/shellUtil.c
@@ -143,7 +143,7 @@ void shellCheckConnectMode() {
shell.args.port = 6041;
}
shell.args.dsn = taosMemoryCalloc(1, 1024);
- snprintf(shell.args.dsn, 1024, "ws://%s:%d/rest/ws",
+ snprintf(shell.args.dsn, 1024, "ws://%s:%d",
shell.args.host, shell.args.port);
}
shell.args.cloud = false;
diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c
index b8b8392b961a92263f791ee4b480e61a8c148efd..94bb909e296cb791538301abae42e01fb704b448 100644
--- a/tools/shell/src/shellWebsocket.c
+++ b/tools/shell/src/shellWebsocket.c
@@ -206,26 +206,31 @@ void shellRunSingleCommandWebsocketImp(char *command) {
printMode = true; // When output to a file, the switch does not work.
}
- if (!shell.ws_conn && shell_conn_ws_server(0)) {
- return;
- }
-
shell.stop_query = false;
- st = taosGetTimestampUs();
+ WS_RES* res;
- WS_RES* res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout);
- int code = ws_errno(res);
- if (code != 0) {
- et = taosGetTimestampUs();
- fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
- if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
- fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
- } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
- fprintf(stderr, "TDengine server is down, will try to reconnect\n");
- shell.ws_conn = NULL;
+ for (int reconnectNum = 0; reconnectNum < 2; reconnectNum++) {
+ if (!shell.ws_conn && shell_conn_ws_server(0)) {
+ return;
}
- ws_free_result(res);
- return;
+ st = taosGetTimestampUs();
+
+ res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout);
+ int code = ws_errno(res);
+ if (code != 0 && !shell.stop_query) {
+ et = taosGetTimestampUs();
+ fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
+ if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
+ fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
+ } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
+ fprintf(stderr, "TDengine server is down, will try to reconnect\n");
+ shell.ws_conn = NULL;
+ }
+ ws_free_result(res);
+ if (reconnectNum == 0) continue;
+ return;
+ }
+ break;
}
double execute_time = ws_take_timing(res)/1E6;