diff --git a/.clang-format b/.clang-format
index e58d518b3b8cacdd1e13dd965805fa364a996eb2..56ca83e724ad0b804a10b9be0dd42aa7a05eeaf7 100644
--- a/.clang-format
+++ b/.clang-format
@@ -88,4 +88,3 @@ Standard: Auto
TabWidth: 8
UseTab: Never
...
-
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..912b302ad23d47c46708d672175a908f2dbc74e8
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.py linguist-detectable=false
diff --git a/cmake/cmake.platform b/cmake/cmake.platform
index 887fbd86d55d782cdf3c1d7c95dfee2dc2ec446d..3aa1ffc07e73acdf480a21b478d55e05153694f8 100644
--- a/cmake/cmake.platform
+++ b/cmake/cmake.platform
@@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
- MESSAGE("Current system arch is arm64")
+ MESSAGE("Current system arch is 64")
SET(TD_DARWIN_64 TRUE)
ADD_DEFINITIONS("-D_TD_DARWIN_64")
ENDIF ()
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 1751549680e2f4daa0be4b4c5e0bc51ccd151334..74c2dbca30f1b0cf2d0d209e5ccc8a19b71e9980 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG d237772
+ GIT_TAG 212c34d
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in
index 506559a245ee7a3e506e8481a12a3fe7f01dd5ac..09a762e7cc8536c353421f324819723f53e5d9b7 100644
--- a/cmake/taosws_CMakeLists.txt.in
+++ b/cmake/taosws_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
- GIT_TAG 7a54d21
+ GIT_TAG b91b39c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/01-index.md b/docs/en/01-index.md
index 363fa1101ccb0509805977154c04aaef8e052468..5265be42f81c4f43fa73e5b7d603d8989c2a5671 100644
--- a/docs/en/01-index.md
+++ b/docs/en/01-index.md
@@ -4,25 +4,24 @@ sidebar_label: Documentation Home
slug: /
---
-
-TDengine is an open source, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
+TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
-TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
+TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly.
-If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work.
+If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
-We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster deployment"](../deployment).
+We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
-TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions.
+TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.
-If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section.
+If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section.
-If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter.
+If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter.
-If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
+If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
-TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
+TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
-Together, we make a difference.
+Together, we make a difference!
diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md
index 8aa40aed091d0dd28348600fd8e56eb0af510fb9..d385845d7c57203d6e1cc8ddb8d53307f2655914 100644
--- a/docs/en/02-intro/index.md
+++ b/docs/en/02-intro/index.md
@@ -3,7 +3,7 @@ title: Introduction
toc_max_heading_level: 2
---
-TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
+TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
@@ -11,48 +11,69 @@ This section introduces the major features, competitive advantages, typical use-
The major features are listed below:
-1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others.
-2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code.
-3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others.
-4. Support for [user defined functions](/develop/udf).
-5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios.
-6. Support for [continuous query](../develop/stream).
-7. Support for [data subscription](../develop/tmq with the capability to specify filter conditions.
-8. Support for [cluster](../deployment/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication.
-9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries.
-10. Provides many ways to [import](/operation/import) and [export](/operation/export) data.
-11. Provides [monitoring](/operation/monitor) on running instances of TDengine.
-12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages.
-13. Provides a [REST API](/reference/rest-api/).
-14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization.
-15. Supports seamless integration with Google Data Studio.
-
-For more details on features, please read through the entire documentation.
+1. Insert data
+ - Supports [using SQL to insert](../develop/insert-data/sql-writing).
+ - Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
+ - Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
+2. Query data
+ - Supports standard [SQL](../taos-sql/), including nested query.
+ - Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
+ - Supports [User Defined Functions (UDF)](../taos-sql/udf).
+3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
+4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
+5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
+6. Visualization
+ - Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
+ - Supports seamless integration with Google Data Studio.
+7. Cluster
+ - Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
+ - Supports [deployment on Kubernetes](../deployment/k8s/).
+ - Supports high availability via data replication.
+8. Administration
+ - Provides [monitoring](../operation/monitor) on running instances of TDengine.
+ - Provides many ways to [import](../operation/import) and [export](../operation/export) data.
+9. Tools
+ - Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
+ - Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
+10. Programming
+ - Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
+ - Provides a [REST API](../reference/rest-api/).
+
+For more details on features, please read through the entire documentation.
## Competitive Advantages
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages.
-- **High-Performance**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
+- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
-- **Simplified Solution**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
+- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
-- **Cloud Native**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
+- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds.
-- **Ease of Use**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
+- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
+ ](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
-- **Easy Data Analytics**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
+- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
-- **Open Source**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
+- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
-With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
+With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
+
+1. With its superior performance, the computing and storage resources are reduced significantly.
+2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly.
+3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
## Technical Ecosystem
+
This is how TDengine would be situated, in a typical time-series data processing platform:
+
+
![TDengine Database Technical Ecosystem ](eco_system.webp)
-Figure 1. TDengine Technical Ecosystem
+Figure 1. TDengine Technical Ecosystem
+
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
@@ -62,48 +83,47 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
### Characteristics and Requirements of Data Sources
-| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
-| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
-| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
-| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
+| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. |
+| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
+| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
### System Architecture Requirements
-| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
-| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
-| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
+| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
+| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
### System Function Requirements
-| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
-| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
+| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. |
+| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
### System Performance Requirements
-| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
-| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
-| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
+| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
+| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
+| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
+| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
-| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
-| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the Taos shell for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
-| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
+| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
+| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. |
+| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. |
## Comparison with other databases
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html)
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html)
-- [TDengine vs InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse](https://www.tdengine.com/downloads/TDengine_Testing_Report_en.pdf)
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html)
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html)
- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md
index 44dcad82fc8e77bdb63ed3f8d5a36b9542c72aea..b0a0c25d85e99244858a461317ff54359d1ceff8 100644
--- a/docs/en/04-concept/index.md
+++ b/docs/en/04-concept/index.md
@@ -104,15 +104,15 @@ Each row contains the device ID, time stamp, collected metrics (current, voltage
## Metric
-Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases.
+Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. In the smart meters example, current, voltage and phase are the metrics.
## Label/Tag
-Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time.
+Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time. In the meters example, `location` and `groupid` are the tags.
## Data Collection Point
-Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points.
+Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
## Table
@@ -137,7 +137,7 @@ The design of one table for one data collection point will require a huge number
STable is a template for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable, in addition to defining the table structure of the metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
-In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**.
+In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. In the smart meters example, we can create a super table named `meters`.
## Subtable
@@ -156,11 +156,13 @@ The relationship between a STable and the subtables created based on this STable
Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type.
-In TDengine, it is recommended to use a subtable instead of a regular table for a DCP.
+In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters.
+
+To better understand the data model using metri, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example. ![Meters Data Model Diagram](./supertable.webp)
## Database
-A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
+A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
In a database, there can be one or more STables, but a STable belongs to only one database. All tables owned by a STable are stored in only one database.
diff --git a/docs/en/04-concept/supertable.webp b/docs/en/04-concept/supertable.webp
new file mode 100644
index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37
Binary files /dev/null and b/docs/en/04-concept/supertable.webp differ
diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md
index 869390cd6c30e8ddc4f0b11ef2402bd7ffb6cf7d..de5b620a779557a8a3b8422a14caf67b354d1e7a 100644
--- a/docs/en/05-get-started/01-docker.md
+++ b/docs/en/05-get-started/01-docker.md
@@ -31,17 +31,6 @@ You can now access TDengine or run other Linux commands.
Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/).
-## Open the TDengine CLI
-
-On the container, run the following command to open the TDengine CLI:
-
-```
-$ taos
-
-taos>
-
-```
-
## Insert Data into TDengine
You can use the `taosBenchmark` tool included with TDengine to write test data into your deployment.
@@ -53,45 +42,57 @@ To do so, run the following command:
```
- This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to ten and a `location` tag of either `California.SanFrancisco` or `California.SanDiego`.
+This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system.
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](/reference/taosbenchmark).
+## Open the TDengine CLI
+
+On the container, run the following command to open the TDengine CLI:
+
+```
+$ taos
+
+taos>
+
+```
+
## Query Data in TDengine
After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance. For example:
-Query the number of rows in the `meters` supertable:
+From the TDengine CLI query the number of rows in the `meters` supertable:
```sql
-taos> select count(*) from test.meters;
+select count(*) from test.meters;
```
Query the average, maximum, and minimum values of all 100 million rows of data:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters;
+select avg(current), max(voltage), min(phase) from test.meters;
```
-Query the number of rows whose `location` tag is `California.SanFrancisco`:
+Query the number of rows whose `location` tag is `San Francisco`:
```sql
-taos> select count(*) from test.meters where location="San Francisco";
+select count(*) from test.meters where location="San Francisco";
```
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
```
-Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
+Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
+select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
```
+In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
## Additional Information
diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md
index 6f6a5087fa7e73f49ed97c5ea5f7db1668cebe9f..88096a759c58529d4150c0a750a4354a88da988f 100644
--- a/docs/en/05-get-started/03-package.md
+++ b/docs/en/05-get-started/03-package.md
@@ -67,13 +67,6 @@ Users will be prompted to enter some configuration information when install.sh i
-
-
-1. Download the Windows installation package.
-
-2. Run the downloaded package to install TDengine.
-
-
You can use `apt-get` to install TDengine from the official package repository.
@@ -102,6 +95,15 @@ sudo apt-get install tdengine
:::tip
This installation method is supported only for Debian and Ubuntu.
::::
+
+
+
+Note: TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform.
+
+1. Download the Windows installation package.
+
+2. Run the downloaded package to install TDengine.
+
@@ -172,6 +174,20 @@ After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengin
+## Test data insert performance
+
+After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
+
+```bash
+taosBenchmark
+```
+
+This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
+
+The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in less than a minute.
+
+You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
+
## Command Line Interface
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, run the following command:
@@ -203,51 +219,38 @@ Query OK, 2 row(s) in set (0.003128s)
```
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
-
-## Test data insert performance
-
-After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
-
-```bash
-taosBenchmark
-```
-
-This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to ten and a `location` tag of either `California.SanFrancisco` or `California.LosAngeles`.
-
-The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in less than a minute.
-
-You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
-
+
## Test data query performance
After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance:
-Query the number of rows in the `meters` supertable:
+From the TDengine CLI query the number of rows in the `meters` supertable:
```sql
-taos> select count(*) from test.meters;
+select count(*) from test.meters;
```
Query the average, maximum, and minimum values of all 100 million rows of data:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters;
+select avg(current), max(voltage), min(phase) from test.meters;
```
-Query the number of rows whose `location` tag is `California.SanFrancisco`:
+Query the number of rows whose `location` tag is `San Francisco`:
```sql
-taos> select count(*) from test.meters where location="California.SanFrancisco";
+select count(*) from test.meters where location="San Francisco";
```
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
```
-Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
+Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
+select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
```
+In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md
index 20537064216f812990414ffd7260dbda64c56251..61eb8f04eb3fb8cea5096b321105fa9e88722bda 100644
--- a/docs/en/07-develop/01-connect/index.md
+++ b/docs/en/07-develop/01-connect/index.md
@@ -1,6 +1,7 @@
---
-title: Connect
-description: "This document explains how to establish connections to TDengine and how to install and use TDengine connectors."
+sidebar_label: Connect
+title: Connect to TDengine
+description: "How to establish connections to TDengine and how to install and use TDengine connectors."
---
import Tabs from "@theme/Tabs";
@@ -279,6 +280,6 @@ Prior to establishing connection, please make sure TDengine is already running a
:::tip
-If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq).
+If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
:::
diff --git a/docs/en/07-develop/03-insert-data/05-high-volume.md b/docs/en/07-develop/03-insert-data/05-high-volume.md
new file mode 100644
index 0000000000000000000000000000000000000000..9ea0c884473e670d0624cb3be737830f46bedc38
--- /dev/null
+++ b/docs/en/07-develop/03-insert-data/05-high-volume.md
@@ -0,0 +1,441 @@
+---
+sidebar_label: High Performance Writing
+title: High Performance Writing
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+This chapter introduces how to write data into TDengine with high throughput.
+
+## How to achieve high performance data writing
+
+To achieve high performance writing, there are a few aspects to consider. In the following sections we will describe these important factors in achieving high performance writing.
+
+### Application Program
+
+From the perspective of application program, you need to consider:
+
+1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
+
+2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
+
+3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch.
+
+4. Data Writing Protocol.
+ - Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
+ - Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it
+ - Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema
+
+Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
+
+### Data Source
+
+Application programs need to read data from data source then write into TDengine. If you meet one or more of below situations, you need to setup message queues between the threads for reading from data source and the threads for writing into TDengine.
+
+1. There are multiple data sources, the data generation speed of each data source is much slower than the speed of single writing thread. In this case, the purpose of message queues is to consolidate the data from multiple data sources together to increase the batch size of single write.
+2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer.
+3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency.
+
+If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
+
+1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch
+2. Subscribe multiple topics to accumulate data together.
+3. Add more consumers to gain more concurrency and throughput.
+4. Incrase the size of single fetch to increase the size of write batch.
+
+### Tune TDengine
+
+On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
+
+For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
+
+## Sample Programs
+
+This section will introduce the sample programs to demonstrate how to write into TDengine with high performance.
+
+### Scenario
+
+Below are the scenario for the sample programs of high performance wrting.
+
+- Application program reads data from data source, the sample program simulates a data source by generating data
+- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size.
+- Application program maps the received data to different writing threads based on table name to make sure all the data for each table is always processed by a specific writing thread.
+- Each writing thread writes the received data into TDengine once the message queue becomes empty or the read data meets a threshold.
+
+![Thread Model of High Performance Writing into TDengine](highvolume.webp)
+
+### Sample Programs
+
+The sample programs listed in this section are based on the scenario described previously. If your scenarios is different, please try to adjust the code based on the principles described in this chapter.
+
+The sample programs assume the source data is for all the different sub tables in same super table (meters). The super table has been created before the sample program starts to writing data. Sub tables are created automatically according to received data. If there are multiple super tables in your case, please try to adjust the part of creating table automatically.
+
+
+
+
+**Program Inventory**
+
+| Class | Description |
+| ---------------- | ----------------------------------------------------------------------------------------------------- |
+| FastWriteExample | Main Program |
+| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name |
+| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine |
+| MockDataSource | Generate data for some sub tables of super table meters |
+| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data |
+| StmtWriter | Write in Parameter binding mode (Not finished yet) |
+| DataBaseMonitor | Calculate the writing speed and output on console every 10 seconds |
+
+Below is the list of complete code of the classes in above table and more detailed description.
+
+
+FastWriteExample
+The main Program is responsible for:
+
+1. Create message queues
+2. Start writing threads
+3. Start reading threads
+4. Otuput writing speed every 10 seconds
+
+The main program provides 4 parameters for tuning:
+
+1. The number of reading threads, default value is 1
+2. The number of writing threads, default alue is 2
+3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables.
+4. The batch size of single write, default value is 3,000
+
+The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
+```
+
+
+
+
+ReadTask
+
+ReadTask reads data from data source. Each ReadTask is associated with a simulated data source, each data source generates data for a group of specific tables, and the data of any table is only generated from a single specific data source.
+
+ReadTask puts data in message queue in blocking mode. That means, the putting operation is blocked if the message queue is full.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
+```
+
+
+
+
+WriteTask
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
+```
+
+
+
+
+
+MockDataSource
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
+```
+
+
+
+
+
+SQLWriter
+
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
+```
+
+
+
+
+
+DataBaseMonitor
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
+```
+
+
+
+**Steps to Launch**
+
+
+Launch Java Sample Program
+
+You need to set environment variable `TDENGINE_JDBC_URL` before launching the program. If TDengine Server is setup on localhost, then the default value for user name, password and port can be used, like below:
+
+```
+TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+```
+
+**Launch in IDE**
+
+1. Clone TDengine repolitory
+ ```
+ git clone git@github.com:taosdata/TDengine.git --depth 1
+ ```
+2. Use IDE to open `docs/examples/java` directory
+3. Configure environment variable `TDENGINE_JDBC_URL`, you can also configure it before launching the IDE, if so you can skip this step.
+4. Run class `com.taos.example.highvolume.FastWriteExample`
+
+**Launch on server**
+
+If you want to launch the sample program on a remote server, please follow below steps:
+
+1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
+ ```
+ mvn package
+ ```
+2. Create `examples/java` directory on the server
+ ```
+ mkdir -p examples/java
+ ```
+3. Copy dependencies (below commands assume you are working on a local Windows host and try to launch on a remote Linux host)
+ - Copy dependent packages
+ ```
+ scp -r .\target\lib @:~/examples/java
+ ```
+ - Copy the jar of sample programs
+ ```
+ scp -r .\target\javaexample-1.0.jar @:~/examples/java
+ ```
+4. Configure environment variable
+ Edit `~/.bash_profile` or `~/.bashrc` and add below:
+
+ ```
+ export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+ ```
+
+ If your TDengine server is not deployed on localhost or doesn't use default port, you need to change the above URL to correct value in your environment.
+
+5. Launch the sample program
+
+ ```
+ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample
+ ```
+
+6. The sample program doesn't exit unless you press CTRL + C to terminate it.
+ Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
+
+ ```
+ root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
+ 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
+ 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
+ 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
+ 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
+ 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
+ 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
+ 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
+ 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
+ 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
+ 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
+ ```
+
+
+
+
+
+
+**Program Inventory**
+
+Sample programs in Python uses multi-process and cross-process message queues.
+
+| Function/CLass | Description |
+| ---------------------------- | --------------------------------------------------------------------------- |
+| main Function | Program entry point, create child processes and message queues |
+| run_monitor_process Function | Create database, super table, calculate writing speed and output to console |
+| run_read_task Function | Read data and distribute to message queues |
+| MockDataSource Class | Simulate data source, return next 1,000 rows of each table |
+| run_write_task Function | Read as much as possible data from message queue and write in batch |
+| SQLWriter Class | Write in SQL and create table utomatically |
+| StmtWriter Class | Write in parameter binding mode (not finished yet) |
+
+
+main function
+
+`main` function is responsible for creating message queues and fork child processes, there are 3 kinds of child processes:
+
+1. Monitoring process, initializes database and calculating writing speed
+2. Reading process (n), reads data from data source
+3. Writing process (m), wirtes data into TDengine
+
+`main` function provides 5 parameters:
+
+1. The number of reading tasks, default value is 1
+2. The number of writing tasks, default value is 1
+3. The number of tables, default value is 1,000
+4. The capacity of message queue, default value is 1,000,000 bytes
+5. The batch size in single write, default value is 3000
+
+```python
+{{#include docs/examples/python/fast_write_example.py:main}}
+```
+
+
+
+
+run_monitor_process
+
+Monitoring process initilizes database and monitoring writing speed.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:monitor}}
+```
+
+
+
+
+
+run_read_task function
+
+Reading process reads data from other data system and distributes to the message queue allocated for it.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:read}}
+```
+
+
+
+
+
+MockDataSource
+
+Below is the simulated data source, we assume table name exists in each generated data.
+
+```python
+{{#include docs/examples/python/mockdatasource.py}}
+```
+
+
+
+
+run_write_task function
+
+Writing process tries to read as much as possible data from message queue and writes in batch.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:write}}
+```
+
+
+
+
+
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
+
+SQLWriter
+
+```python
+{{#include docs/examples/python/sql_writer.py}}
+```
+
+
+
+**Steps to Launch**
+
+
+
+Launch Sample Program in Python
+
+1. Prerequisities
+
+ - TDengine client driver has been installed
+ - Python3 has been installed, the the version >= 3.8
+ - TDengine Python connector `taospy` has been installed
+
+2. Install faster-fifo to replace python builtin multiprocessing.Queue
+
+ ```
+ pip3 install faster-fifo
+ ```
+
+3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
+
+4. Execute the program
+
+ ```
+ python3 fast_write_example.py
+ ```
+
+ Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
+
+ ```
+ root@vm85$ python3 fast_write_example.py 8 8
+ 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
+ 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
+ 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
+ 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
+ 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
+ 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
+ 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
+ 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
+ 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
+ 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
+ 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
+ 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
+ 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
+ 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
+ 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
+ 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
+ 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
+ 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
+ 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
+ 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
+ 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
+ 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
+ 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
+ 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
+ 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
+ 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
+ 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
+ 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
+ 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
+ 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
+ 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
+ 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
+ ```
+
+
+
+:::note
+Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue.
+
+:::
+
+
+
diff --git a/docs/en/07-develop/03-insert-data/highvolume.webp b/docs/en/07-develop/03-insert-data/highvolume.webp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad
Binary files /dev/null and b/docs/en/07-develop/03-insert-data/highvolume.webp differ
diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx
index ceeea64fca91473ea62de404fb9e92c179f7e6d4..17b3f5caa062eaacb4216b7153e899040e702cc1 100644
--- a/docs/en/07-develop/07-tmq.mdx
+++ b/docs/en/07-develop/07-tmq.mdx
@@ -16,7 +16,7 @@ import CDemo from "./_sub_c.mdx";
TDengine provides data subscription and consumption interfaces similar to message queue products. These interfaces make it easier for applications to obtain data written to TDengine either in real time and to process data in the order that events occurred. This simplifies your time-series data processing systems and reduces your costs because it is no longer necessary to deploy a message queue product such as Kafka.
-To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, standard table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications.
+To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications.
By subscribing to a topic, a consumer can obtain the latest data in that topic in real time. Multiple consumers can be formed into a consumer group that consumes messages together. Consumer groups enable faster speed through multi-threaded, distributed data consumption. Note that consumers in different groups that are subscribed to the same topic do not consume messages together. A single consumer can subscribe to multiple topics. If the data in a supertable is sharded across multiple vnodes, consumer groups can consume it much more efficiently than single consumers. TDengine also includes an acknowledgement mechanism that ensures at-least-once delivery in complicated environments where machines may crash or restart.
diff --git a/docs/en/07-develop/08-cache.md b/docs/en/07-develop/08-cache.md
index 4892c21c9ddb97b3f967053ee64be24f8cb78c85..82a4787016f608f8e32e89b1747443b7cd164551 100644
--- a/docs/en/07-develop/08-cache.md
+++ b/docs/en/07-develop/08-cache.md
@@ -20,11 +20,11 @@ In theory, larger cache sizes are always better. However, at a certain point, it
## Read Cache
-When you create a database, you can configure whether the latest data from every subtable is cached. To do so, set the *cachelast* parameter as follows:
-- 0: Caching is disabled.
-- 1: The latest row of data in each subtable is cached. This option significantly improves the performance of the `LAST_ROW` function
-- 2: The latest non-null value in each column of each subtable is cached. This option significantly improves the performance of the `LAST` function in normal situations, such as WHERE, ORDER BY, GROUP BY, and INTERVAL statements.
-- 3: Rows and columns are both cached. This option is equivalent to simultaneously enabling options 1 and 2.
+When you create a database, you can configure whether the latest data from every subtable is cached. To do so, set the *cachemodel* parameter as follows:
+- none: Caching is disabled.
+- last_row: The latest row of data in each subtable is cached. This option significantly improves the performance of the `LAST_ROW` function
+- last_value: The latest non-null value in each column of each subtable is cached. This option significantly improves the performance of the `LAST` function in normal situations, such as WHERE, ORDER BY, GROUP BY, and INTERVAL statements.
+- both: Rows and columns are both cached. This option is equivalent to simultaneously enabling option last_row and last_value.
## Metadata Cache
diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md
index bfbb547bd4177cba369ec9d3d2541bceed853ef0..5dfcd3108d8b10cf24cdd5c852c4225ced0f16b2 100644
--- a/docs/en/10-deployment/01-deploy.md
+++ b/docs/en/10-deployment/01-deploy.md
@@ -39,18 +39,18 @@ To get the hostname on any host, the command `hostname -f` can be executed.
On the physical machine running the application, ping the dnode that is running taosd. If the dnode is not accessible, the application cannot connect to taosd. In this case, verify the DNS and hosts settings on the physical node running the application.
-The end point of each dnode is the output hostname and port, such as h1.taosdata.com:6030.
+The end point of each dnode is the output hostname and port, such as h1.tdengine.com:6030.
### Step 5
-Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following.
+Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.tdengine.com:6030", its `taos.cfg` is configured as following.
```c
// firstEp is the end point to connect to when any dnode starts
-firstEp h1.taosdata.com:6030
+firstEp h1.tdengine.com:6030
// must be configured to the FQDN of the host where the dnode is launched
-fqdn h1.taosdata.com
+fqdn h1.tdengine.com
// the port used by the dnode, default is 6030
serverPort 6030
@@ -76,13 +76,13 @@ The first dnode can be started following the instructions in [Get Started](/get-
taos> show dnodes;
id | endpoint | vnodes | support_vnodes | status | create_time | note |
============================================================================================================================================
-1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
+1 | h1.tdengine.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
Query OK, 1 rows affected (0.007984s)
```
-From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster.
+From the above output, it is shown that the end point of the started dnode is "h1.tdengine.com:6030", which is the `firstEp` of the cluster.
## Add DNODE
@@ -90,7 +90,7 @@ There are a few steps necessary to add other dnodes in the cluster.
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
-Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command:
+Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:
```sql
CREATE DNODE "h2.taos.com:6030";
@@ -98,7 +98,7 @@ CREATE DNODE "h2.taos.com:6030";
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
-Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos`
+Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
```sql
SHOW DNODES;
@@ -114,7 +114,9 @@ The above process can be repeated to add more dnodes in the cluster.
Any node that is in the cluster and online can be the firstEp of new nodes.
Nodes use the firstEp parameter only when joining a cluster for the first time. After a node has joined the cluster, it stores the latest mnode in its end point list and no longer makes use of firstEp.
-However, firstEp is used by clients that connect to the cluster. For example, if you run `taos shell` without arguments, it connects to the firstEp by default.
+
+However, firstEp is used by clients that connect to the cluster. For example, if you run TDengine CLI `taos` without arguments, it connects to the firstEp by default.
+
Two dnodes that are launched without a firstEp value operate independently of each other. It is not possible to add one dnode to the other dnode and form a cluster. It is also not possible to form two independent clusters into a new cluster.
:::
diff --git a/docs/en/10-deployment/03-k8s.md b/docs/en/10-deployment/03-k8s.md
index b3f71ed5bd0e0dbaf3108cc40be6b18bdf5fb7e8..b0aa6777130864404e97dc332cf0e5ce830bf8ed 100644
--- a/docs/en/10-deployment/03-k8s.md
+++ b/docs/en/10-deployment/03-k8s.md
@@ -9,6 +9,7 @@ TDengine is a cloud-native time-series database that can be deployed on Kubernet
Before deploying TDengine on Kubernetes, perform the following:
+* Current steps are compatible with Kubernetes v1.5 and later version.
* Install and configure minikube, kubectl, and helm.
* Install and deploy Kubernetes and ensure that it can be accessed and used normally. Update any container registries or other services as necessary.
@@ -100,7 +101,7 @@ spec:
# Must set if you want a cluster.
- name: TAOS_FIRST_EP
value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
- # TAOS_FQND should always be setted in k8s env.
+ # TAOS_FQDN should always be set in k8s env.
- name: TAOS_FQDN
value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
volumeMounts:
diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md
index 48cd9df32c16d346ceece01f01ee3880231427e7..a4fa68100078efe85fff5e1b078ebd07e5337d5a 100644
--- a/docs/en/10-deployment/05-helm.md
+++ b/docs/en/10-deployment/05-helm.md
@@ -152,7 +152,7 @@ clusterDomainSuffix: ""
# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
# to a camelCase taos config variable `debugFlag`.
#
-# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
+# See the [Configuration Variables](../../reference/config)
#
# Note:
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
@@ -170,71 +170,21 @@ taoscfg:
# number of replications, for cluster only
TAOS_REPLICA: "1"
-
- # number of days per DB file
- # TAOS_DAYS: "10"
-
- # number of days to keep DB file, default is 10 years.
- #TAOS_KEEP: "3650"
-
- # cache block size (Mbyte)
- #TAOS_CACHE: "16"
-
- # number of cache blocks per vnode
- #TAOS_BLOCKS: "6"
-
- # minimum rows of records in file block
- #TAOS_MIN_ROWS: "100"
-
- # maximum rows of records in file block
- #TAOS_MAX_ROWS: "4096"
-
#
- # TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core
- #TAOS_NUM_OF_THREADS_PER_CORE: "1.0"
+ # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC
+ #TAOS_NUM_OF_RPC_THREADS: "2"
+
#
# TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data
#TAOS_NUM_OF_COMMIT_THREADS: "4"
- #
- # TAOS_RATIO_OF_QUERY_CORES:
- # the proportion of total CPU cores available for query processing
- # 2.0: the query threads will be set to double of the CPU cores.
- # 1.0: all CPU cores are available for query processing [default].
- # 0.5: only half of the CPU cores are available for query.
- # 0.0: only one core available.
- #TAOS_RATIO_OF_QUERY_CORES: "1.0"
-
- #
- # TAOS_KEEP_COLUMN_NAME:
- # the last_row/first/last aggregator will not change the original column name in the result fields
- #TAOS_KEEP_COLUMN_NAME: "0"
-
- # enable/disable backuping vnode directory when removing vnode
- #TAOS_VNODE_BAK: "1"
-
# enable/disable installation / usage report
#TAOS_TELEMETRY_REPORTING: "1"
- # enable/disable load balancing
- #TAOS_BALANCE: "1"
-
- # max timer control blocks
- #TAOS_MAX_TMR_CTRL: "512"
-
# time interval of system monitor, seconds
#TAOS_MONITOR_INTERVAL: "30"
- # number of seconds allowed for a dnode to be offline, for cluster only
- #TAOS_OFFLINE_THRESHOLD: "8640000"
-
- # RPC re-try timer, millisecond
- #TAOS_RPC_TIMER: "1000"
-
- # RPC maximum time for ack, seconds.
- #TAOS_RPC_MAX_TIME: "600"
-
# time interval of dnode status reporting to mnode, seconds, for cluster only
#TAOS_STATUS_INTERVAL: "1"
@@ -245,37 +195,7 @@ taoscfg:
#TAOS_MIN_SLIDING_TIME: "10"
# minimum time window, milli-second
- #TAOS_MIN_INTERVAL_TIME: "10"
-
- # maximum delay before launching a stream computation, milli-second
- #TAOS_MAX_STREAM_COMP_DELAY: "20000"
-
- # maximum delay before launching a stream computation for the first time, milli-second
- #TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000"
-
- # retry delay when a stream computation fails, milli-second
- #TAOS_RETRY_STREAM_COMP_DELAY: "10"
-
- # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
- #TAOS_STREAM_COMP_DELAY_RATIO: "0.1"
-
- # max number of vgroups per db, 0 means configured automatically
- #TAOS_MAX_VGROUPS_PER_DB: "0"
-
- # max number of tables per vnode
- #TAOS_MAX_TABLES_PER_VNODE: "1000000"
-
- # the number of acknowledgments required for successful data writing
- #TAOS_QUORUM: "1"
-
- # enable/disable compression
- #TAOS_COMP: "2"
-
- # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
- #TAOS_WAL_LEVEL: "1"
-
- # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
- #TAOS_FSYNC: "3000"
+ #TAOS_MIN_INTERVAL_TIME: "1"
# the compressed rpc message, option:
# -1 (no compression)
@@ -283,17 +203,8 @@ taoscfg:
# > 0 (rpc message body which larger than this value will be compressed)
#TAOS_COMPRESS_MSG_SIZE: "-1"
- # max length of an SQL
- #TAOS_MAX_SQL_LENGTH: "1048576"
-
- # the maximum number of records allowed for super table time sorting
- #TAOS_MAX_NUM_OF_ORDERED_RES: "100000"
-
# max number of connections allowed in dnode
- #TAOS_MAX_SHELL_CONNS: "5000"
-
- # max number of connections allowed in client
- #TAOS_MAX_CONNECTIONS: "5000"
+ #TAOS_MAX_SHELL_CONNS: "50000"
# stop writing logs when the disk size of the log folder is less than this value
#TAOS_MINIMAL_LOG_DIR_G_B: "0.1"
@@ -313,21 +224,8 @@ taoscfg:
# enable/disable system monitor
#TAOS_MONITOR: "1"
- # enable/disable recording the SQL statements via restful interface
- #TAOS_HTTP_ENABLE_RECORD_SQL: "0"
-
- # number of threads used to process http requests
- #TAOS_HTTP_MAX_THREADS: "2"
-
- # maximum number of rows returned by the restful interface
- #TAOS_RESTFUL_ROW_LIMIT: "10240"
-
- # The following parameter is used to limit the maximum number of lines in log files.
- # max number of lines per log filters
- # numOfLogLines 10000000
-
# enable/disable async log
- #TAOS_ASYNC_LOG: "0"
+ #TAOS_ASYNC_LOG: "1"
#
# time of keeping log files, days
@@ -344,25 +242,8 @@ taoscfg:
# debug flag for all log type, take effect when non-zero value\
#TAOS_DEBUG_FLAG: "143"
- # enable/disable recording the SQL in taos client
- #TAOS_ENABLE_RECORD_SQL: "0"
-
# generate core file when service crash
#TAOS_ENABLE_CORE_FILE: "1"
-
- # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
- #TAOS_MAX_BINARY_DISPLAY_WIDTH: "30"
-
- # enable/disable stream (continuous query)
- #TAOS_STREAM: "1"
-
- # in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
- #TAOS_RETRIEVE_BLOCKING_MODEL: "0"
-
- # the maximum allowed query buffer size in MB during query processing for each data node
- # -1 no limit (default)
- # 0 no query allowed, queries are disabled
- #TAOS_QUERY_BUFFER_SIZE: "-1"
```
## Scaling Out
diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md
index b830994ac9323f85d9ca68a40366edd9f2da1432..876de50f35ee3ba533bd7d5916632de853a84c0e 100644
--- a/docs/en/12-taos-sql/01-data-type.md
+++ b/docs/en/12-taos-sql/01-data-type.md
@@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
- Internal function `now` can be used to get the current timestamp on the client side
- The current timestamp of the client side is applied when `now` is used to insert data
-- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT)
+- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md
index d9dadae976bf07bbf6cfb49401d55bb0bf18da49..5a84bbf3709ff2355157409ae11d5f85191a8271 100644
--- a/docs/en/12-taos-sql/02-database.md
+++ b/docs/en/12-taos-sql/02-database.md
@@ -71,9 +71,9 @@ database_option: {
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
-- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
-- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
-- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
+- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
+- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
+- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
### Example Statement
diff --git a/docs/en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md
index bf32cf171bbeea23ada946d5011a73dd70ddd6ca..5a2c8ed6ee4a5ea129023fec68fa97d577832f60 100644
--- a/docs/en/12-taos-sql/03-table.md
+++ b/docs/en/12-taos-sql/03-table.md
@@ -57,7 +57,7 @@ table_option: {
3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database.
4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first.
5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
-6. TTL: specifies the time to live (TTL) for the table. If the period specified by the TTL parameter elapses without any data being written to the table, TDengine will automatically delete the table. Note: The system may not delete the table at the exact moment that the TTL expires. Enter a value in days. The default value is 0. Note: The TTL parameter has a higher priority than the KEEP parameter. If a table is marked for deletion because the TTL has expired, it will be deleted even if the time specified by the KEEP parameter has not elapsed. This parameter can be used with standard tables and subtables.
+6. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
## Create Subtables
diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md
index 439205696b3012c17865898d50635ee0fed580ab..1dd0caed38235d3d10813b2cd74fec6446c5ec24 100644
--- a/docs/en/12-taos-sql/06-select.md
+++ b/docs/en/12-taos-sql/06-select.md
@@ -171,8 +171,8 @@ The \_QSTART and \_QEND pseudocolumns contain the beginning and end of the time
The \_QSTART and \_QEND pseudocolumns cannot be used in a WHERE clause.
-**\_WSTART, \_WEND, and \_DURATION**
-\_WSTART, \_WEND, and \_WDURATION pseudocolumns
+**\_WSTART, \_WEND, and \_WDURATION**
+
The \_WSTART, \_WEND, and \_WDURATION pseudocolumns indicate the beginning, end, and duration of a window.
These pseudocolumns can be used only in time window-based aggregations and must occur after the aggregation clause.
diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md
index dbbee0d0fd520fe4bca7bee5ae4fa859b3ec98d9..f74d0dbe5c6ae2019b266df4c55a13a49630bf71 100644
--- a/docs/en/12-taos-sql/10-function.md
+++ b/docs/en/12-taos-sql/10-function.md
@@ -613,6 +613,7 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
**Explanations**:
- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
+- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
### AVG
@@ -846,7 +847,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
### INTERP
```sql
-SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
+SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT});
```
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
@@ -861,11 +862,10 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
-- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2.
-- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1.
-- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned.
-- `INTERP` can only be used to interpolate in single timeline. So it must be used with `group by tbname` when it's used on a STable. It can't be used with `GROUP BY` when it's used in the inner query of a nested query.
-- The result of `INTERP` is not influenced by `ORDER BY TIMESTAMP`, which impacts the output order only..
+- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
+- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
+- Interpolation is performed based on `FILL` parameter.
+- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
### LAST
@@ -1140,7 +1140,7 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
**Applicable parameter values**:
-- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive
+- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val : Numeric types
**Return value type**: Integer
@@ -1167,7 +1167,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
**Applicable parameter values**:
-- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive
+- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val : Numeric types
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
@@ -1232,7 +1232,7 @@ SELECT SERVER_VERSION();
### SERVER_STATUS
```sql
-SELECT SERVER_VERSION();
+SELECT SERVER_STATUS();
```
**Description**: The server status.
diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md
index 7390fe983f0dff7476f606f9989aae8f73c52c0e..707089abe54fc12bb09de47c1c51af1a32b8cbcd 100644
--- a/docs/en/12-taos-sql/12-distinguished.md
+++ b/docs/en/12-taos-sql/12-distinguished.md
@@ -58,6 +58,15 @@ The following restrictions apply:
- The window clause cannot be used with a GROUP BY clause.
- `WHERE` clause can be used to specify the starting and ending time and other filter conditions
+
+### Window Pseudocolumns
+
+**\_WSTART, \_WEND, and \_WDURATION**
+
+The \_WSTART, \_WEND, and \_WDURATION pseudocolumns indicate the beginning, end, and duration of a window.
+
+These pseudocolumns occur after the aggregation clause.
+
### FILL Clause
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md
index fcd78765104af17285b43749969821ceb98da33b..17e4e4d1b0da6d0461c9ab478a9430855379fb12 100644
--- a/docs/en/12-taos-sql/14-stream.md
+++ b/docs/en/12-taos-sql/14-stream.md
@@ -44,13 +44,13 @@ For example, the following SQL statement creates a stream and automatically crea
```sql
CREATE STREAM avg_vol_s INTO avg_vol AS
-SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
+SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
```
## Delete a Stream
```sql
-DROP STREAM [IF NOT EXISTS] stream_name
+DROP STREAM [IF EXISTS] stream_name
```
This statement deletes the stream processing service only. The data generated by the stream is retained.
diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md
index 0486ea30940cdcb5d034bb730d12c0c120a59cd1..678c38a22ea763187cd0c87dceae3bf6ca03957c 100644
--- a/docs/en/12-taos-sql/19-limit.md
+++ b/docs/en/12-taos-sql/19-limit.md
@@ -30,7 +30,7 @@ The following characters cannot occur in a password: single quotation marks ('),
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- The maximum length of a tag name is 64 bytes
- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values cannot exceed 16 KB.
-- Maximum length of single SQL statement is 1 MB (1048576 bytes). It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
+- Maximum length of single SQL statement is 1 MB (1048576 bytes).
- At most 4096 columns can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded.
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
- The number of replicas can only be 1 or 3.
diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md
index 796b25dcb0a425aa0ffd76a6e9b8de45ba069357..9bda5a0a1027243ea5f50c55e303fdb7155c853b 100644
--- a/docs/en/12-taos-sql/22-meta.md
+++ b/docs/en/12-taos-sql/22-meta.md
@@ -245,3 +245,35 @@ Provides dnode configuration information.
| 1 | dnode_id | INT | Dnode ID |
| 2 | name | BINARY(32) | Parameter |
| 3 | value | BINARY(64) | Value |
+
+## INS_TOPICS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :---------: | ------------ | ------------------------------ |
+| 1 | topic_name | BINARY(192) | Topic name |
+| 2 | db_name | BINARY(64) | Database for the topic |
+| 3 | create_time | TIMESTAMP | Creation time |
+| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
+
+## INS_SUBSCRIPTIONS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :------------: | ------------ | ------------------------ |
+| 1 | topic_name | BINARY(204) | Subscribed topic |
+| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
+| 3 | vgroup_id | INT | Vgroup ID for the consumer |
+| 4 | consumer_id | BIGINT | Consumer ID |
+
+## INS_STREAMS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :----------: | ------------ | --------------------------------------- |
+| 1 | stream_name | BINARY(64) | Stream name |
+| 2 | create_time | TIMESTAMP | Creation time |
+| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
+| 4 | status | BIANRY(20) | Current status |
+| 5 | source_db | BINARY(64) | Source database |
+| 6 | target_db | BIANRY(64) | Target database |
+| 7 | target_table | BINARY(192) | Target table |
+| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
+| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md
index 10a93380220d357261914066d2fe036b8470e224..29cf3af6abfbbc06e42ae99c78f35f33a3c7c30a 100644
--- a/docs/en/12-taos-sql/23-perf.md
+++ b/docs/en/12-taos-sql/23-perf.md
@@ -61,15 +61,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 12 | sub_status | BINARY(1000) | Subquery status |
| 13 | sql | BINARY(1024) | SQL statement |
-## PERF_TOPICS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :---------: | ------------ | ------------------------------ |
-| 1 | topic_name | BINARY(192) | Topic name |
-| 2 | db_name | BINARY(64) | Database for the topic |
-| 3 | create_time | TIMESTAMP | Creation time |
-| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
-
## PERF_CONSUMERS
| # | **Column** | **Data Type** | **Description** |
@@ -83,15 +74,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering |
-## PERF_SUBSCRIPTIONS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :------------: | ------------ | ------------------------ |
-| 1 | topic_name | BINARY(204) | Subscribed topic |
-| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
-| 3 | vgroup_id | INT | Vgroup ID for the consumer |
-| 4 | consumer_id | BIGINT | Consumer ID |
-
## PERF_TRANS
| # | **Column** | **Data Type** | **Description** |
@@ -113,17 +95,3 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | stable_name | BINARY(192) | Supertable name |
| 4 | vgroup_id | INT | Dedicated vgroup name |
-
-## PERF_STREAMS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :----------: | ------------ | --------------------------------------- |
-| 1 | stream_name | BINARY(64) | Stream name |
-| 2 | create_time | TIMESTAMP | Creation time |
-| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
-| 4 | status | BIANRY(20) | Current status |
-| 5 | source_db | BINARY(64) | Source database |
-| 6 | target_db | BIANRY(64) | Target database |
-| 7 | target_table | BINARY(192) | Target table |
-| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
-| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md
index 96503c95989b4ae2e99fa0c38181a74232e6dc23..5f3bef3546ea05745070268e1f6add25add4773b 100644
--- a/docs/en/12-taos-sql/24-show.md
+++ b/docs/en/12-taos-sql/24-show.md
@@ -3,17 +3,7 @@ sidebar_label: SHOW Statement
title: SHOW Statement for Metadata
---
-In addition to running SELECT statements on INFORMATION_SCHEMA, you can also use SHOW to obtain system metadata, information, and status.
-
-## SHOW ACCOUNTS
-
-```sql
-SHOW ACCOUNTS;
-```
-
-Shows information about tenants on the system.
-
-Note: TDengine Enterprise Edition only.
+`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
## SHOW APPS
@@ -194,7 +184,7 @@ Shows information about streams in the system.
SHOW SUBSCRIPTIONS;
```
-Shows all subscriptions in the current database.
+Shows all subscriptions in the system.
## SHOW TABLES
diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md
index 37438ee780cac17b463e0dbb1b5385d0f3965de7..b9a3fa2321c8d073845d0cf9157ce335c930e06f 100644
--- a/docs/en/12-taos-sql/25-grant.md
+++ b/docs/en/12-taos-sql/25-grant.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Permissions Management
-title: Permissions Management
+sidebar_label: Access Control
+title: User and Access Control
+description: Manage user and user's permission
---
This document describes how to manage permissions in TDengine.
diff --git a/docs/en/12-taos-sql/index.md b/docs/en/12-taos-sql/index.md
index e243cd23186a6b9286d3297e467567c26c316112..a5ffc9dc8dce158eccc0fa0519f09ba346710c31 100644
--- a/docs/en/12-taos-sql/index.md
+++ b/docs/en/12-taos-sql/index.md
@@ -1,6 +1,6 @@
---
title: TDengine SQL
-description: "The syntax supported by TDengine SQL "
+description: 'The syntax supported by TDengine SQL '
---
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
@@ -15,7 +15,7 @@ Syntax Specifications used in this chapter:
- | means one of a few options, excluding | itself.
- … means the item prior to it can be repeated multiple times.
-To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
+To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
```
taos> DESCRIBE meters;
diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md
index 24ef518f5c5b087567c7801b3cd6f8d34257eb1f..d7713b943f5fe8fbd5e685b8ba03ff8cc8ed4e53 100644
--- a/docs/en/13-operation/01-pkg-install.md
+++ b/docs/en/13-operation/01-pkg-install.md
@@ -15,9 +15,41 @@ About details of installing TDenine, please refer to [Installation Guide](../../
## Uninstall
-
+
+
+Apt-get package of TDengine can be uninstalled as below:
+
+```bash
+$ sudo apt-get remove tdengine
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ tdengine
+0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n] y
+(Reading database ... 135625 files and directories currently installed.)
+Removing tdengine (3.0.0.0) ...
+TDengine is removed successfully!
+
+```
+
+Apt-get package of taosTools can be uninstalled as below:
-TBD
+```
+$ sudo apt remove taostools
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ taostools
+0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n]
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
@@ -32,6 +64,14 @@ TDengine is removed successfully!
```
+Deb package of taosTools can be uninstalled as below:
+
+```
+$ sudo dpkg -r taostools
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
@@ -43,6 +83,13 @@ $ sudo rpm -e tdengine
TDengine is removed successfully!
```
+RPM package of taosTools can be uninstalled as below:
+
+```
+sudo rpm -e taostools
+taosToole is removed successfully!
+```
+
@@ -54,6 +101,15 @@ $ rmtaos
TDengine is removed successfully!
```
+tar.gz package of taosTools can be uninstalled as below:
+
+```
+$ rmtaostools
+Start to uninstall taos tools ...
+
+taos tools is uninstalled successfully!
+```
+
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
diff --git a/docs/en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md
index ba9d5d75e3415ba0a7a4e1c579327b9f73e2a48f..21a5a902822d7b85f555114a112686d4e35c64aa 100644
--- a/docs/en/13-operation/03-tolerance.md
+++ b/docs/en/13-operation/03-tolerance.md
@@ -27,4 +27,4 @@ The number of dnodes in a TDengine cluster must NOT be lower than the number of
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
-Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. For more information, see [taosX](../../reference/taosX).
+Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com.
diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
index 8d4186a36bb983e688ae2824f13c71f4461bebf2..ce28ee87d9317487d5c610d23287775be6b753ec 100644
--- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
@@ -10,7 +10,7 @@ One difference from the native connector is that the REST interface is stateless
## Installation
-The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol.
+The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. The REST interface is provided by [taosAdapter](../taosadapter), to use REST interface you need to make sure `taosAdapter` is running properly.
## Verification
@@ -18,12 +18,12 @@ If the TDengine server is already installed, it can be verified as follows:
The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
-The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
+The following example lists all databases on the host h1.tdengine.com. To use it in your environment, replace `h1.tdengine.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```bash
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
-d "select name, ntables, status from information_schema.ins_databases;" \
- h1.taosdata.com:6041/rest/sql
+ h1.tdengine.com:6041/rest/sql
```
The following return value results indicate that the verification passed.
diff --git a/docs/en/14-reference/03-connector/cpp.mdx b/docs/en/14-reference/03-connector/03-cpp.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/cpp.mdx
rename to docs/en/14-reference/03-connector/03-cpp.mdx
index 5839ed4af89723dcee5e80c186af25a90ae59972..02d7df48db540a3eb44379ada7332b2838924212 100644
--- a/docs/en/14-reference/03-connector/cpp.mdx
+++ b/docs/en/14-reference/03-connector/03-cpp.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 1
sidebar_label: C/C++
title: C/C++ Connector
---
diff --git a/docs/en/14-reference/03-connector/java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/java.mdx
rename to docs/en/14-reference/03-connector/04-java.mdx
index 39514c37ebf45974ad90b1b7b1e548c8cd4ea672..129d90ea85d9455c1ae460b3799b5253dd3a49fc 100644
--- a/docs/en/14-reference/03-connector/java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 2
sidebar_label: Java
title: TDengine Java Connector
description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors.
@@ -134,8 +133,6 @@ The configuration parameters in the URL are as follows:
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false.
-For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html).
-
**Connect using the TDengine client-driven configuration file **
When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below:
diff --git a/docs/en/14-reference/03-connector/go.mdx b/docs/en/14-reference/03-connector/05-go.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/go.mdx
rename to docs/en/14-reference/03-connector/05-go.mdx
index 29263550403e71614296e52285c956040b04387f..518d3625d54492c2b6ec209302ac91ca32d03ad2 100644
--- a/docs/en/14-reference/03-connector/go.mdx
+++ b/docs/en/14-reference/03-connector/05-go.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 4
sidebar_label: Go
title: TDengine Go Connector
---
@@ -8,7 +7,7 @@ title: TDengine Go Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
diff --git a/docs/en/14-reference/03-connector/rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/rust.mdx
rename to docs/en/14-reference/03-connector/06-rust.mdx
index e9b16ba94d1db27d0571aad24d04492aeea32fb8..0d391c6ac308c5e9e998e2e7e3423cc5a809905e 100644
--- a/docs/en/14-reference/03-connector/rust.mdx
+++ b/docs/en/14-reference/03-connector/06-rust.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 5
sidebar_label: Rust
title: TDengine Rust Connector
---
@@ -8,7 +7,7 @@ title: TDengine Rust Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
diff --git a/docs/en/14-reference/03-connector/python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
similarity index 96%
rename from docs/en/14-reference/03-connector/python.mdx
rename to docs/en/14-reference/03-connector/07-python.mdx
index 1a15da62d6cd94a95b62208cdf76ab6b5df1230a..d92a93fd4fd79bfa449249a16e87268b924c8475 100644
--- a/docs/en/14-reference/03-connector/python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 3
sidebar_label: Python
title: TDengine Python Connector
description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas."
@@ -8,7 +7,7 @@ description: "taospy is the official Python connector for TDengine. taospy provi
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
+`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
@@ -150,10 +149,19 @@ If the test is successful, it will output the server version information, e.g.
```json
{
- "status": "succ",
- "head": ["server_version()"],
- "column_meta": [["server_version()", 8, 8]],
- "data": [["2.4.0.16"]],
+ "code": 0,
+ "column_meta": [
+ [
+ "server_version()",
+ "VARCHAR",
+ 7
+ ]
+ ],
+ "data": [
+ [
+ "3.0.0.0"
+ ]
+ ],
"rows": 1
}
```
diff --git a/docs/en/14-reference/03-connector/node.mdx b/docs/en/14-reference/03-connector/08-node.mdx
similarity index 98%
rename from docs/en/14-reference/03-connector/node.mdx
rename to docs/en/14-reference/03-connector/08-node.mdx
index d1700444351d6f54f799a1c84674735800959c3c..bf7c6b95ea67dc8bf8fa1277591b549a2fd6322d 100644
--- a/docs/en/14-reference/03-connector/node.mdx
+++ b/docs/en/14-reference/03-connector/08-node.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 6
sidebar_label: Node.js
title: TDengine Node.js Connector
---
@@ -8,7 +7,7 @@ title: TDengine Node.js Connector
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-import Preparition from "./_preparition.mdx";
+import Preparition from "./_preparation.mdx";
import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx";
import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
diff --git a/docs/en/14-reference/03-connector/csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx
similarity index 98%
rename from docs/en/14-reference/03-connector/csharp.mdx
rename to docs/en/14-reference/03-connector/09-csharp.mdx
index 388ae49d09e1ee8a7e0f012432d9bbb98da3fc45..bc16cd086bdbef4b594df6e866a019a02ae54fd8 100644
--- a/docs/en/14-reference/03-connector/csharp.mdx
+++ b/docs/en/14-reference/03-connector/09-csharp.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 7
sidebar_label: C#
title: C# Connector
---
@@ -8,7 +7,7 @@ title: C# Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx"
import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx"
import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx"
@@ -173,7 +172,6 @@ namespace TDengineExample
`Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to:
* Interface download:
-* Usage notes:
## Frequently Asked Questions
diff --git a/docs/en/14-reference/03-connector/php.mdx b/docs/en/14-reference/03-connector/10-php.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/php.mdx
rename to docs/en/14-reference/03-connector/10-php.mdx
index 9ee89d468a2fd86381b3521796886813c0fe6b06..820f70375982eb54cdd87602b891e5f04756c0e5 100644
--- a/docs/en/14-reference/03-connector/php.mdx
+++ b/docs/en/14-reference/03-connector/10-php.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 1
sidebar_label: PHP
title: PHP Connector
---
diff --git a/docs/en/14-reference/03-connector/_preparation.mdx b/docs/en/14-reference/03-connector/_preparation.mdx
index 07ebdbca3d891ff51a254bc1b83016f1404bb47e..c6e42ce02348595da0fdd75847d6442c285dc10a 100644
--- a/docs/en/14-reference/03-connector/_preparation.mdx
+++ b/docs/en/14-reference/03-connector/_preparation.mdx
@@ -2,7 +2,7 @@
:::info
-Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine.
+Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
diff --git a/docs/en/14-reference/03-connector/03-connector.mdx b/docs/en/14-reference/03-connector/index.mdx
similarity index 100%
rename from docs/en/14-reference/03-connector/03-connector.mdx
rename to docs/en/14-reference/03-connector/index.mdx
diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md
index 31310b0f3e4f6fae7e65328baf4f9ad5d8b7b22f..e7ea620d0bed3aee3ff0acf8063120acca33c335 100644
--- a/docs/en/14-reference/04-taosadapter.md
+++ b/docs/en/14-reference/04-taosadapter.md
@@ -329,4 +329,4 @@ In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) conta
| 3 | telegrafUseFieldNum | See the taosAdapter telegraf configuration method | |
| 4 | restfulRowLimit | restfulRowLimit | Embedded httpd outputs 10240 rows of data by default, the maximum allowed is 102400. taosAdapter also provides restfulRowLimit but it is not limited by default. You can configure it according to the actual scenario.
| 5 | httpDebugFlag | Not applicable | httpdDebugFlag does not work for taosAdapter |
-| 6 | httpDBNameMandatory | N/A | taosAdapter requires the database name to be specified in the URL |
\ No newline at end of file
+| 6 | httpDBNameMandatory | N/A | taosAdapter requires the database name to be specified in the URL |
diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md
index 2105ba83fad9700674e28609016b07ef6de66833..e73441a96b087062b2e3912ed73010fc3e761bb9 100644
--- a/docs/en/14-reference/06-taosdump.md
+++ b/docs/en/14-reference/06-taosdump.md
@@ -116,5 +116,4 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.
-Report bugs to .
```
diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md
index e74c9de7b2aa71278a99d45f250e0dcaf86d4704..2e562035254311f2caa0b6d4512842080aab64d5 100644
--- a/docs/en/14-reference/07-tdinsight/index.md
+++ b/docs/en/14-reference/07-tdinsight/index.md
@@ -263,7 +263,7 @@ Once the import is complete, the full page view of TDinsight is shown below.
## TDinsight dashboard details
-The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases.
+The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](../../taos-sql/node/) or databases.
Details of the metrics are as follows.
diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md
index 3528fd194b0dd40b2bf67dd3e05a42d93a47ab05..7cd1e810dca010d16b0f2e257d47e012c6ef06cc 100644
--- a/docs/en/14-reference/11-docker/index.md
+++ b/docs/en/14-reference/11-docker/index.md
@@ -72,7 +72,7 @@ Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`.
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
```
-Finally, the TDengine service can be accessed from the taos shell or any connector with "tdengine" as the server address.
+Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address.
```shell
taos -h tdengine -P 6030
@@ -115,8 +115,8 @@ If you want to start your application in a container, you need to add the corres
```docker
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
-ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+ENV TDENGINE_VERSION=3.0.0.0
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -216,8 +216,8 @@ Here is the full Dockerfile:
```docker
FROM golang:1.17.6-buster as builder
-ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+ENV TDENGINE_VERSION=3.0.0.0
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -232,8 +232,8 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
-ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+ENV TDENGINE_VERSION=3.0.0.0
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -320,7 +320,7 @@ password: taosdata
2. Start the cluster
```shell
- $ VERSION=2.4.0.0 docker-compose up -d
+ $ VERSION=3.0.0.0 docker-compose up -d
Creating network "test_default" with the default driver
Creating volume "test_taosdata-td1" with default driver
Creating volume "test_taoslog-td1" with default driver
@@ -457,7 +457,7 @@ If you want to deploy a container-based TDengine cluster on multiple hosts, you
The docker-compose file can refer to the previous section. Here is the command to start TDengine with docker swarm:
```shell
-$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos
+$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos
Creating network taos_inter
Creating network taos_api
Creating service taos_arbitrator
@@ -473,20 +473,20 @@ Checking status:
$ docker stack ps taos
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago
-3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago
-100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago
-pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago
-tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago
-rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago
-i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago
-lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago
+3e94u72msiyg taos_adapter.1 tdengine/tdengine:3.0.0.0 TM1702 Running Running 56 seconds ago
+100amjkwzsc6 taos_td-2.1 tdengine/tdengine:3.0.0.0 TM1703 Running Running about a minute ago
+pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:3.0.0.0 TM1704 Running Running 2 minutes ago
+tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:3.0.0.0 TM1705 Running Running 2 minutes ago
+rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:3.0.0.0 TM1706 Running Running 56 seconds ago
+i2augxamfllf taos_adapter.3 tdengine/tdengine:3.0.0.0 TM1707 Running Running 56 seconds ago
+lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:3.0.0.0 TM1708 Running Running 56 seconds ago
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
-561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0
-3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0
+561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0
+3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:3.0.0.0
d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
-2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0
-9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0
+2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0
+9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0
```
From the above output, you can see two dnodes, two taosAdapters, and one Nginx reverse proxy service.
@@ -502,5 +502,5 @@ verify: Service converged
$ docker service ls -f name=taos_adapter
ID NAME MODE REPLICAS IMAGE PORTS
-561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0
+561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0
```
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index 88a4aa2d81a1a3462a93f61ad919e0fe530b92e8..02921c3f6a4ce21175504c3c07bd51bb4a3dcf60 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -82,13 +82,12 @@ The parameters described in this document by the effect that they have on the sy
| Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
-| TCP | 6041 | REST connection between client and server | refer to [taosAdapter](../taosadapter/) |
+| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper |
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
-
### maxShellConns
| Attribute | Description |
@@ -345,7 +344,7 @@ The charset that takes effect is UTF-8.
| Attribute | Description |
| -------- | --------------------------------- |
| Applicable | Server and Client |
-| Meaning | The interval for taos shell to send heartbeat to mnode |
+| Meaning | The interval for TDengine CLI to send heartbeat to mnode |
| Unit | second |
| Value Range | 1-120 |
| Default Value | 3 |
diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md
index 816ebe0047c0dbf1d3031c51e6b481ab0cff2840..4f50c38cbbfda9d8d8567517f9109f18e2007988 100644
--- a/docs/en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/en/14-reference/13-schemaless/13-schemaless.md
@@ -1,6 +1,6 @@
---
title: Schemaless Writing
-description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
+description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.'
---
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
@@ -25,7 +25,7 @@ where:
- measurement will be used as the data table name. It will be separated from tag_set by a comma.
- `tag_set` will be used as tags, with format like `=,=` Enter a space between `tag_set` and `field_set`.
- `field_set`will be used as data columns, with format like `=,=` Enter a space between `field_set` and `timestamp`.
-- `timestamp` is the primary key timestamp corresponding to this row of data
+- `timestamp` is the primary key timestamp corresponding to this row of data
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
@@ -36,14 +36,14 @@ In the schemaless writing data line protocol, each data item in the field_set ne
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
- Numeric types will be distinguished from data types by the suffix.
-| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
-| -------- | -------- | ------------ | -------------- |
-| 1 | None or f64 | double | 8 |
-| 2 | f32 | float | 4 |
-| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
-| 4 | i16/u16 | SmallInt/USmallInt | 2 |
-| 5 | i32/u32 | Int/UInt | 4 |
-| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
+| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
+| ----------------- | ----------- | ----------------------------- | ---------------- |
+| 1 | None or f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
@@ -61,7 +61,7 @@ Note that if the wrong case is used when describing the data type suffix, or if
Schemaless writes process row data according to the following principles.
-1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
+1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
@@ -82,7 +82,7 @@ You can configure smlChildTableName to specify table names, for example, `smlChi
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
-16KB. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
@@ -90,23 +90,23 @@ All processing logic of schemaless will still follow TDengine's underlying restr
Three specified modes are supported in the schemaless writing process, as follows:
-| **Serial** | **Value** | **Description** |
-| -------- | ------------------- | ------------------------------- |
-| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
-| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
-| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
+| **Serial** | **Value** | **Description** |
+| ---------- | ------------------- | ---------------------- |
+| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
+| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
+| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
In InfluxDB line protocol mode, you must specify the precision of the input timestamp. Valid precisions are described in the following table.
-| **No.** | **Precision** | **Description** |
-| -------- | --------------------------------- | -------------- |
-| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
-| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
-| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
-| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
-| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
-| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
-| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
+| **No.** | **Precision** | **Description** |
+| ------- | --------------------------------- | --------------------- |
+| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
+| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
+| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
+| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
+| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
+| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
+| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
In OpenTSDB file and JSON protocol modes, the precision of the timestamp is determined from its length in the standard OpenTSDB manner. User input is ignored.
diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md
index 476b5a1fd20b4dce4379026a6300ae8e26db6656..665bc75380d4f59666d792d074fb37c65c810264 100644
--- a/docs/en/14-reference/14-taosKeeper.md
+++ b/docs/en/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: Instructions and tips for using taosKeeper
+description: exports TDengine monitoring metrics.
---
## Introduction
@@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
### Configuration and running methods
-
-taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
+taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
-
+### Environment variable
+
+You can use Environment variable to run taosKeeper and control its behavior:
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+you can run `taoskeeper -h` for more detail.
+
### Configuration File
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**Sample configuration files**
@@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
#### Export Monitoring Metrics
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
Sample result set (excerpt):
diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md
index 0900dd3d7571dc0ab8d93174aa2d7b5eccf1fbf5..2ead1bbaf40f06fec2a5cbf85e46fdfdcc5216df 100644
--- a/docs/en/20-third-party/09-emq-broker.md
+++ b/docs/en/20-third-party/09-emq-broker.md
@@ -9,7 +9,7 @@ MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emq
The following preparations are required for EMQX to add TDengine data sources correctly.
- The TDengine cluster is deployed and working properly
-- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
+- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended.
## Install and start EMQX
@@ -28,8 +28,6 @@ USE test;
CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP);
```
-Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario.
-
## Configuring EMQX Rules
Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation.
@@ -137,5 +135,5 @@ Use the TDengine CLI program to log in and query the appropriate databases and t
![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp)
-Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
+Please refer to the [TDengine official documentation](https://docs.tdengine.com/) for more details on how to use TDengine.
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
diff --git a/docs/en/20-third-party/12-google-data-studio.md b/docs/en/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc94f98056bbeeeec88ca7ea12a4a6a7e6f15dc5
--- /dev/null
+++ b/docs/en/20-third-party/12-google-data-studio.md
@@ -0,0 +1,36 @@
+---
+sidebar_label: Google Data Studio
+title: Use Google Data Studio to access TDengine
+---
+
+Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
+
+TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution.
+
+The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
+
+With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
+
+![02](gds/gds-02.png.webp)
+
+Select the TDengine connector and click Authorize.
+
+![03](gds/gds-03.png.webp)
+
+Then sign in to your Google Account and click Allow to enable the connection to TDengine.
+
+![04](gds/gds-04.png.webp)
+
+In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect.
+
+![05](gds/gds-05.png.webp)
+
+After the connection is established, you can use Data Studio to process your data and create reports.
+
+![06](gds/gds-06.png.webp)
+
+In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
+
+![07](gds/gds-07.png.webp)
+
+With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
diff --git a/docs/en/20-third-party/emqx/client-num.webp b/docs/en/20-third-party/emqx/client-num.webp
index a151b184843607d67b649babb3145bfb3e329cda..a8ac6fb4c036b1db697d537472732ca854e583c8 100644
Binary files a/docs/en/20-third-party/emqx/client-num.webp and b/docs/en/20-third-party/emqx/client-num.webp differ
diff --git a/docs/en/20-third-party/gds/gds-01.webp b/docs/en/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000000000000000000000000000000000..2e5f9e4ff5db1e37718e2397c9a13a9f0e05602d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-01.webp differ
diff --git a/docs/en/20-third-party/gds/gds-02.png.webp b/docs/en/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3b3537f5a488019482f94452e70bd1bd79867ab5
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-03.png.webp b/docs/en/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5719436d5b2f21aa861067b966511e4b34d17dce
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-04.png.webp b/docs/en/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ddaae5c1a63b6b4db692e12491df55b88dcaadee
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-05.png.webp b/docs/en/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9a917678fc7e60f0a739fa1e2b0f4fa010d12708
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-06.png.webp b/docs/en/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..c76b68d32b5907bd5ba4e4010456f2ca5303448f
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-07.png.webp b/docs/en/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1386ae9c4db4f2465dd071afc5a047658b47031c
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-08.png.webp b/docs/en/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..59dcf8b31df8bde8d4073ee0c7b1c7bdd7bd439d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-09.png.webp b/docs/en/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b94439f211a814f66d41231c9386c57f3ffe8322
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-10.png.webp b/docs/en/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a63cad9e9a3d412b1132359506530498fb1a0e57
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-11.png.webp b/docs/en/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fc38cd9a29c00afa48238741c33b439f737a7b8f
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/en/20-third-party/import_dashboard.webp b/docs/en/20-third-party/import_dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..164e3f4690a5a55f937a3c29e1e8ca026648e6b1
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard.webp differ
diff --git a/docs/en/20-third-party/import_dashboard1.webp b/docs/en/20-third-party/import_dashboard1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d4fb374ce8bb75c8a0fbdbb9cab5b30eb29ab06d
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard1.webp differ
diff --git a/docs/en/20-third-party/import_dashboard2.webp b/docs/en/20-third-party/import_dashboard2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9f74dc96be20ab64b5fb555aaccdaa1c1139b35c
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard2.webp differ
diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md
index 44651c0496481c410640e577aaad5781f846e302..2f876adffc2543bb9f117e5812ccc5241d7a6d99 100644
--- a/docs/en/21-tdinternal/01-arch.md
+++ b/docs/en/21-tdinternal/01-arch.md
@@ -12,6 +12,7 @@ The design of TDengine is based on the assumption that any hardware or software
Logical structure diagram of TDengine's distributed architecture is as follows:
![TDengine Database architecture diagram](structure.webp)
+
Figure 1: TDengine architecture diagram
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
@@ -38,15 +39,16 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
-**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
+**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
-1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
+1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
-**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+
- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
@@ -57,6 +59,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.
![typical process of TDengine Database](message.webp)
+
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
@@ -121,16 +124,17 @@ The load balancing process does not require any manual intervention, and it is t
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
-### Leader vnode Writing Process
+### Leader vnode Writing Process
Leader Vnode uses a writing process as follows:
![TDengine Database Leader Writing Process](write_master.webp)
+
Figure 3: TDengine Leader writing process
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
-3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
+3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
@@ -140,6 +144,7 @@ Leader Vnode uses a writing process as follows:
For a follower vnode, the write process as follows:
![TDengine Database Follower Writing Process](write_slave.webp)
+
Figure 4: TDengine Follower Writing Process
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
@@ -212,6 +217,7 @@ When data is written to disk, the system decideswhether to compress the data bas
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
dataDir format is as follows:
+
```
dataDir data_path [tier_level]
```
@@ -270,6 +276,7 @@ For the data collected by device D1001, the number of records per hour is counte
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure:
![TDengine Database Diagram of multi-table aggregation query](multi_tables.webp)
+
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system;
@@ -279,9 +286,8 @@ TDengine creates a separate table for each data collection point, but in practic
5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
-Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
+Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details.
### Precomputation
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL.
-
diff --git a/docs/en/25-application/01-telegraf.md b/docs/en/25-application/01-telegraf.md
index d30a23fe1b942e1411e8b5f1320e1c54ae2b407f..59491152bcda3e26ec12aaa59ac1041ef23c4e7e 100644
--- a/docs/en/25-application/01-telegraf.md
+++ b/docs/en/25-application/01-telegraf.md
@@ -34,7 +34,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### TDengine
-Download the latest TDengine-server 2.4.0.x or above from the [Downloads](http://taosdata.com/cn/all-downloads/) page on the TAOSData website and install it.
+Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
## Data Connection Setup
@@ -79,5 +79,5 @@ Click on the plus icon on the left and select `Import` to get the data from `htt
## Wrap-up
-The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes.
+The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the schemaless protocol parsing feature in TDengine and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes.
Please refer to the official documentation and product implementation cases for other features.
diff --git a/docs/en/25-application/02-collectd.md b/docs/en/25-application/02-collectd.md
index 1733ed1b1af8c9375c3773d1ca86831396499a78..692cd8d929f04a03e4433bd0b71f84101bc362cb 100644
--- a/docs/en/25-application/02-collectd.md
+++ b/docs/en/25-application/02-collectd.md
@@ -37,7 +37,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### Install TDengine
-Download the latest TDengine-server 2.4.0.x or above from the [Downloads](http://taosdata.com/cn/all-downloads/) page on the TAOSData website and install it.
+Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
## Data Connection Setup
@@ -99,6 +99,6 @@ Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob
## Wrap-up
-TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes.
+TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes.
For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases.
diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md
index 9614574c71b0a28853de413ea6928101da899bf7..1aabaa43e77660d72bca00d7d59cdee69b1a7c92 100644
--- a/docs/en/25-application/03-immigrate.md
+++ b/docs/en/25-application/03-immigrate.md
@@ -41,7 +41,7 @@ The agents deployed in the application nodes are responsible for providing opera
- **TDengine installation and deployment**
-First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html).
+First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to [Install TDengine](../../get-started/package)
Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters.
@@ -51,7 +51,7 @@ TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a
Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios.
-Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/).
+Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](../../reference/taosadapter/).
If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows.
@@ -411,7 +411,7 @@ TDengine provides a wealth of help documents to explain many aspects of cluster
### Cluster Deployment
-The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats.
+The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to [Install TDengine](../../get-started/package) for more details.
Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters.
diff --git a/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp b/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp
index 147a65b17bff2aa0e44faa206618bdce5664e1ca..a8b52f3b2d6efe2b83c087d81b98c89fa941f263 100644
Binary files a/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp and b/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp differ
diff --git a/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp b/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp
index fd5461ec9b37be66cac4c17fb1f81fec76158330..fac96f4eb63232c405bfb5e09eda7ed00901a7b5 100644
Binary files a/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp and b/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp differ
diff --git a/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp b/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp
index 105afcdb8312b23675f62ff6339d5e737b5cd958..fac96f4eb63232c405bfb5e09eda7ed00901a7b5 100644
Binary files a/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp and b/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp differ
diff --git a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
index af97fe4373ca964260e5614f133f359e229b0e15..9d85bf2a94abda71bcdab89d46008b70e52ce437 100644
--- a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
+++ b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
@@ -16,14 +16,14 @@ public class RestInsertExample {
private static List getRawData() {
return Arrays.asList(
- "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
- "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
- "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
- "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
- "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
- "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
- "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
- "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
+ "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2",
+ "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2",
+ "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2",
+ "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3",
+ "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2",
+ "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2",
+ "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3",
+ "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3"
);
}
diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
index 50e8b357719fc6d1f4707e474afdf58fb4531970..179e6e6911185631901b79e34a343967e73c4936 100644
--- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
@@ -57,7 +57,7 @@ public class SubscribeDemo {
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
properties.setProperty(TMQConstants.GROUP_ID, "test");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
- "com.taosdata.jdbc.MetersDeserializer");
+ "com.taos.example.MetersDeserializer");
// poll data
try (TaosConsumer consumer = new TaosConsumer<>(properties)) {
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..04b149a4b96441ecfd1b0bdde54c9ed71349cab2
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java
@@ -0,0 +1,63 @@
+package com.taos.example.highvolume;
+
+import java.sql.*;
+
+/**
+ * Prepare target database.
+ * Count total records in database periodically so that we can estimate the writing speed.
+ */
+public class DataBaseMonitor {
+ private Connection conn;
+ private Statement stmt;
+
+ public DataBaseMonitor init() throws SQLException {
+ if (conn == null) {
+ String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
+ conn = DriverManager.getConnection(jdbcURL);
+ stmt = conn.createStatement();
+ }
+ return this;
+ }
+
+ public void close() {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ }
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+
+ public void prepareDatabase() throws SQLException {
+ stmt.execute("DROP DATABASE IF EXISTS test");
+ stmt.execute("CREATE DATABASE test");
+ stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
+ }
+
+ public Long count() throws SQLException {
+ if (!stmt.isClosed()) {
+ ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters");
+ result.next();
+ return result.getLong(1);
+ }
+ return null;
+ }
+
+ /**
+ * show test.stables;
+ *
+ * name | created_time | columns | tags | tables |
+ * ============================================================================================
+ * meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 |
+ */
+ public Long getTableCount() throws SQLException {
+ if (!stmt.isClosed()) {
+ ResultSet result = stmt.executeQuery("show test.stables");
+ result.next();
+ return result.getLong(5);
+ }
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..41b59551ca69a4056c2f2b572d169bd08dc4fcfe
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java
@@ -0,0 +1,70 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.*;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+
+
+public class FastWriteExample {
+ final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class);
+
+ final static int taskQueueCapacity = 1000000;
+ final static List> taskQueues = new ArrayList<>();
+ final static List readTasks = new ArrayList<>();
+ final static List writeTasks = new ArrayList<>();
+ final static DataBaseMonitor databaseMonitor = new DataBaseMonitor();
+
+ public static void stopAll() {
+ logger.info("shutting down");
+ readTasks.forEach(task -> task.stop());
+ writeTasks.forEach(task -> task.stop());
+ databaseMonitor.close();
+ }
+
+ public static void main(String[] args) throws InterruptedException, SQLException {
+ int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1;
+ int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3;
+ int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000;
+ int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000;
+
+ logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}",
+ readTaskCount, writeTaskCount, tableCount, maxBatchSize);
+
+ databaseMonitor.init().prepareDatabase();
+
+ // Create task queues, whiting tasks and start writing threads.
+ for (int i = 0; i < writeTaskCount; ++i) {
+ BlockingQueue queue = new ArrayBlockingQueue<>(taskQueueCapacity);
+ taskQueues.add(queue);
+ WriteTask task = new WriteTask(queue, maxBatchSize);
+ Thread t = new Thread(task);
+ t.setName("WriteThread-" + i);
+ t.start();
+ }
+
+ // create reading tasks and start reading threads
+ int tableCountPerTask = tableCount / readTaskCount;
+ for (int i = 0; i < readTaskCount; ++i) {
+ ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask);
+ Thread t = new Thread(task);
+ t.setName("ReadThread-" + i);
+ t.start();
+ }
+
+ Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll));
+
+ long lastCount = 0;
+ while (true) {
+ Thread.sleep(10000);
+ long numberOfTable = databaseMonitor.getTableCount();
+ long count = databaseMonitor.count();
+ logger.info("numberOfTable={} count={} speed={}", numberOfTable, count, (count - lastCount) / 10);
+ lastCount = count;
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java
new file mode 100644
index 0000000000000000000000000000000000000000..6fe83f002ebcb9d82e026e9a32886fd22bfefbe9
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java
@@ -0,0 +1,53 @@
+package com.taos.example.highvolume;
+
+import java.util.Iterator;
+
+/**
+ * Generate test data
+ */
+class MockDataSource implements Iterator {
+ private String tbNamePrefix;
+ private int tableCount;
+ private long maxRowsPerTable = 1000000000L;
+
+ // 100 milliseconds between two neighbouring rows.
+ long startMs = System.currentTimeMillis() - maxRowsPerTable * 100;
+ private int currentRow = 0;
+ private int currentTbId = -1;
+
+ // mock values
+ String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"};
+ float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
+ int[] voltage = {119, 116, 111, 113, 118};
+ float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
+
+ public MockDataSource(String tbNamePrefix, int tableCount) {
+ this.tbNamePrefix = tbNamePrefix;
+ this.tableCount = tableCount;
+ }
+
+ @Override
+ public boolean hasNext() {
+ currentTbId += 1;
+ if (currentTbId == tableCount) {
+ currentTbId = 0;
+ currentRow += 1;
+ }
+ return currentRow < maxRowsPerTable;
+ }
+
+ @Override
+ public String next() {
+ long ts = startMs + 100 * currentRow;
+ int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1;
+ StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName
+ sb.append(ts).append(','); // ts
+ sb.append(current[currentRow % 5]).append(','); // current
+ sb.append(voltage[currentRow % 5]).append(','); // voltage
+ sb.append(phase[currentRow % 5]).append(','); // phase
+ sb.append(location[currentRow % 5]).append(','); // location
+ sb.append(groupId); // groupID
+
+ return sb.toString();
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..a6fcfed1d28281d46aff493ef9783972858ebe62
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java
@@ -0,0 +1,58 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+
+class ReadTask implements Runnable {
+ private final static Logger logger = LoggerFactory.getLogger(ReadTask.class);
+ private final int taskId;
+ private final List> taskQueues;
+ private final int queueCount;
+ private final int tableCount;
+ private boolean active = true;
+
+ public ReadTask(int readTaskId, List> queues, int tableCount) {
+ this.taskId = readTaskId;
+ this.taskQueues = queues;
+ this.queueCount = queues.size();
+ this.tableCount = tableCount;
+ }
+
+ /**
+ * Assign data received to different queues.
+ * Here we use the suffix number in table name.
+ * You are expected to define your own rule in practice.
+ *
+ * @param line record received
+ * @return which queue to use
+ */
+ public int getQueueId(String line) {
+ String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101
+ String suffixNumber = tbName.split("_")[1];
+ return Integer.parseInt(suffixNumber) % this.queueCount;
+ }
+
+ @Override
+ public void run() {
+ logger.info("started");
+ Iterator it = new MockDataSource("tb" + this.taskId, tableCount);
+ try {
+ while (it.hasNext() && active) {
+ String line = it.next();
+ int queueId = getQueueId(line);
+ taskQueues.get(queueId).put(line);
+ }
+ } catch (Exception e) {
+ logger.error("Read Task Error", e);
+ }
+ }
+
+ public void stop() {
+ logger.info("stop");
+ this.active = false;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..c2989acdbe3d0f56d7451ac86051a55955ce14de
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java
@@ -0,0 +1,205 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.*;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A helper class encapsulate the logic of writing using SQL.
+ *
+ * The main interfaces are two methods:
+ *
+ * {@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.
+ * {@link SQLWriter#flush}, which assemble INSERT statement and execute it.
+ *
+ *
+ * There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb".
+ * This ensure that checking table existence is a one-time-only operation.
+ *
+ *
+ *
+ */
+public class SQLWriter {
+ final static Logger logger = LoggerFactory.getLogger(SQLWriter.class);
+
+ private Connection conn;
+ private Statement stmt;
+
+ /**
+ * current number of buffered records
+ */
+ private int bufferedCount = 0;
+ /**
+ * Maximum number of buffered records.
+ * Flush action will be triggered if bufferedCount reached this value,
+ */
+ private int maxBatchSize;
+
+
+ /**
+ * Maximum SQL length.
+ */
+ private int maxSQLLength;
+
+ /**
+ * Map from table name to column values. For example:
+ * "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)"
+ */
+ private Map tbValues = new HashMap<>();
+
+ /**
+ * Map from table name to tag values in the same order as creating stable.
+ * Used for creating table.
+ */
+ private Map tbTags = new HashMap<>();
+
+ public SQLWriter(int maxBatchSize) {
+ this.maxBatchSize = maxBatchSize;
+ }
+
+
+ /**
+ * Get Database Connection
+ *
+ * @return Connection
+ * @throws SQLException
+ */
+ private static Connection getConnection() throws SQLException {
+ String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
+ return DriverManager.getConnection(jdbcURL);
+ }
+
+ /**
+ * Create Connection and Statement
+ *
+ * @throws SQLException
+ */
+ public void init() throws SQLException {
+ conn = getConnection();
+ stmt = conn.createStatement();
+ stmt.execute("use test");
+ ResultSet rs = stmt.executeQuery("show variables");
+ while (rs.next()) {
+ String configName = rs.getString(1);
+ if ("maxSQLLength".equals(configName)) {
+ maxSQLLength = Integer.parseInt(rs.getString(2));
+ logger.info("maxSQLLength={}", maxSQLLength);
+ }
+ }
+ }
+
+ /**
+ * Convert raw data to SQL fragments, group them by table name and cache them in a HashMap.
+ * Trigger writing when number of buffered records reached maxBachSize.
+ *
+ * @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId
+ */
+ public void processLine(String line) throws SQLException {
+ bufferedCount += 1;
+ int firstComma = line.indexOf(',');
+ String tbName = line.substring(0, firstComma);
+ int lastComma = line.lastIndexOf(',');
+ int secondLastComma = line.lastIndexOf(',', lastComma - 1);
+ String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") ";
+ if (tbValues.containsKey(tbName)) {
+ tbValues.put(tbName, tbValues.get(tbName) + value);
+ } else {
+ tbValues.put(tbName, value);
+ }
+ if (!tbTags.containsKey(tbName)) {
+ String location = line.substring(secondLastComma + 1, lastComma);
+ String groupId = line.substring(lastComma + 1);
+ String tagValues = "('" + location + "'," + groupId + ')';
+ tbTags.put(tbName, tagValues);
+ }
+ if (bufferedCount == maxBatchSize) {
+ flush();
+ }
+ }
+
+
+ /**
+ * Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it.
+ * In case of "Table does not exit" exception, create all tables in the sql and retry the sql.
+ */
+ public void flush() throws SQLException {
+ StringBuilder sb = new StringBuilder("INSERT INTO ");
+ for (Map.Entry entry : tbValues.entrySet()) {
+ String tableName = entry.getKey();
+ String values = entry.getValue();
+ String q = tableName + " values " + values + " ";
+ if (sb.length() + q.length() > maxSQLLength) {
+ executeSQL(sb.toString());
+ logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance");
+ sb = new StringBuilder("INSERT INTO ");
+ }
+ sb.append(q);
+ }
+ executeSQL(sb.toString());
+ tbValues.clear();
+ bufferedCount = 0;
+ }
+
+ private void executeSQL(String sql) throws SQLException {
+ try {
+ stmt.executeUpdate(sql);
+ } catch (SQLException e) {
+ // convert to error code defined in taoserror.h
+ int errorCode = e.getErrorCode() & 0xffff;
+ if (errorCode == 0x362 || errorCode == 0x218) {
+ // Table does not exist
+ createTables();
+ executeSQL(sql);
+ } else {
+ logger.error("Execute SQL: {}", sql);
+ throw e;
+ }
+ } catch (Throwable throwable) {
+ logger.error("Execute SQL: {}", sql);
+ throw throwable;
+ }
+ }
+
+ /**
+ * Create tables in batch using syntax:
+ *
+ * CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
+ *
+ */
+ private void createTables() throws SQLException {
+ StringBuilder sb = new StringBuilder("CREATE TABLE ");
+ for (String tbName : tbValues.keySet()) {
+ String tagValues = tbTags.get(tbName);
+ sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" ");
+ }
+ String sql = sb.toString();
+ try {
+ stmt.executeUpdate(sql);
+ } catch (Throwable throwable) {
+ logger.error("Execute SQL: {}", sql);
+ throw throwable;
+ }
+ }
+
+ public boolean hasBufferedValues() {
+ return bufferedCount > 0;
+ }
+
+ public int getBufferedCount() {
+ return bufferedCount;
+ }
+
+ public void close() {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ }
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..8ade06625d708a112c85d5657aa00bcd0e605ff4
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java
@@ -0,0 +1,4 @@
+package com.taos.example.highvolume;
+
+public class StmtWriter {
+}
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..de9e5463d7dc59478f991e4783aacaae527b4c4b
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java
@@ -0,0 +1,58 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.BlockingQueue;
+
+class WriteTask implements Runnable {
+ private final static Logger logger = LoggerFactory.getLogger(WriteTask.class);
+ private final int maxBatchSize;
+
+ // the queue from which this writing task get raw data.
+ private final BlockingQueue queue;
+
+ // A flag indicate whether to continue.
+ private boolean active = true;
+
+ public WriteTask(BlockingQueue taskQueue, int maxBatchSize) {
+ this.queue = taskQueue;
+ this.maxBatchSize = maxBatchSize;
+ }
+
+ @Override
+ public void run() {
+ logger.info("started");
+ String line = null; // data getting from the queue just now.
+ SQLWriter writer = new SQLWriter(maxBatchSize);
+ try {
+ writer.init();
+ while (active) {
+ line = queue.poll();
+ if (line != null) {
+ // parse raw data and buffer the data.
+ writer.processLine(line);
+ } else if (writer.hasBufferedValues()) {
+ // write data immediately if no more data in the queue
+ writer.flush();
+ } else {
+ // sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, .
+ Thread.sleep(100);
+ }
+ }
+ if (writer.hasBufferedValues()) {
+ writer.flush();
+ }
+ } catch (Exception e) {
+ String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount());
+ logger.error(msg, e);
+ } finally {
+ writer.close();
+ }
+ }
+
+ public void stop() {
+ logger.info("stop");
+ this.active = false;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java
index 42db24485afec05298159f7b0c3a4e15835d98ed..8d201da0745e1d2d36220c9d78383fc37d4a813a 100644
--- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java
+++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java
@@ -23,16 +23,16 @@ public class TestAll {
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
try (Statement stmt = conn.createStatement()) {
- String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
- " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
- " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
- " power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
- " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
- " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
+ String sql = "INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
+ " power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
+ " power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
+ " power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
+ " power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
+ " power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
stmt.execute(sql);
}
diff --git a/docs/examples/python/connect_native_reference.py b/docs/examples/python/connect_native_reference.py
index c17e9795b58724f6646b8d7c0f84047098a93d69..09b0685acef8c68b30153da5a1761d36b1cf9aae 100644
--- a/docs/examples/python/connect_native_reference.py
+++ b/docs/examples/python/connect_native_reference.py
@@ -11,10 +11,10 @@ conn: taos.TaosConnection = taos.connect(host="localhost",
server_version = conn.server_info
print("server_version", server_version)
client_version = conn.client_info
-print("client_version", client_version) # 2.4.0.16
+print("client_version", client_version) # 3.0.0.0
conn.close()
# possible output:
-# 2.4.0.16
-# 2.4.0.16
+# 3.0.0.0
+# 3.0.0.0
diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d606388fdecd85f1468f24cc497ecc5941f035
--- /dev/null
+++ b/docs/examples/python/fast_write_example.py
@@ -0,0 +1,180 @@
+# install dependencies:
+# recommend python >= 3.8
+# pip3 install faster-fifo
+#
+
+import logging
+import math
+import sys
+import time
+import os
+from multiprocessing import Process
+from faster_fifo import Queue
+from mockdatasource import MockDataSource
+from queue import Empty
+from typing import List
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s")
+
+READ_TASK_COUNT = 1
+WRITE_TASK_COUNT = 1
+TABLE_COUNT = 1000
+QUEUE_SIZE = 1000000
+MAX_BATCH_SIZE = 3000
+
+read_processes = []
+write_processes = []
+
+
+def get_connection():
+ """
+ If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used.
+ You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD
+ """
+ import taos
+ firstEP = os.environ.get("TDENGINE_FIRST_EP")
+ if firstEP:
+ host, port = firstEP.split(":")
+ else:
+ host, port = None, 0
+ user = os.environ.get("TDENGINE_USER", "root")
+ password = os.environ.get("TDENGINE_PASSWORD", "taosdata")
+ return taos.connect(host=host, port=int(port), user=user, password=password)
+
+
+# ANCHOR: read
+
+def run_read_task(task_id: int, task_queues: List[Queue]):
+ table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
+ data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
+ try:
+ for batch in data_source:
+ for table_id, rows in batch:
+ # hash data to different queue
+ i = table_id % len(task_queues)
+ # block putting forever when the queue is full
+ task_queues[i].put_many(rows, block=True, timeout=-1)
+ except KeyboardInterrupt:
+ pass
+
+
+# ANCHOR_END: read
+
+# ANCHOR: write
+def run_write_task(task_id: int, queue: Queue):
+ from sql_writer import SQLWriter
+ log = logging.getLogger(f"WriteTask-{task_id}")
+ writer = SQLWriter(get_connection)
+ lines = None
+ try:
+ while True:
+ try:
+ # get as many as possible
+ lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
+ writer.process_lines(lines)
+ except Empty:
+ time.sleep(0.01)
+ except KeyboardInterrupt:
+ pass
+ except BaseException as e:
+ log.debug(f"lines={lines}")
+ raise e
+
+
+# ANCHOR_END: write
+
+def set_global_config():
+ argc = len(sys.argv)
+ if argc > 1:
+ global READ_TASK_COUNT
+ READ_TASK_COUNT = int(sys.argv[1])
+ if argc > 2:
+ global WRITE_TASK_COUNT
+ WRITE_TASK_COUNT = int(sys.argv[2])
+ if argc > 3:
+ global TABLE_COUNT
+ TABLE_COUNT = int(sys.argv[3])
+ if argc > 4:
+ global QUEUE_SIZE
+ QUEUE_SIZE = int(sys.argv[4])
+ if argc > 5:
+ global MAX_BATCH_SIZE
+ MAX_BATCH_SIZE = int(sys.argv[5])
+
+
+# ANCHOR: monitor
+def run_monitor_process():
+ log = logging.getLogger("DataBaseMonitor")
+ conn = get_connection()
+ conn.execute("DROP DATABASE IF EXISTS test")
+ conn.execute("CREATE DATABASE test")
+ conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
+ "TAGS (location BINARY(64), groupId INT)")
+
+ def get_count():
+ res = conn.query("SELECT count(*) FROM test.meters")
+ rows = res.fetch_all()
+ return rows[0][0] if rows else 0
+
+ last_count = 0
+ while True:
+ time.sleep(10)
+ count = get_count()
+ log.info(f"count={count} speed={(count - last_count) / 10}")
+ last_count = count
+
+
+# ANCHOR_END: monitor
+# ANCHOR: main
+def main():
+ set_global_config()
+ logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
+ f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
+
+ monitor_process = Process(target=run_monitor_process)
+ monitor_process.start()
+ time.sleep(3) # waiting for database ready.
+
+ task_queues: List[Queue] = []
+ # create task queues
+ for i in range(WRITE_TASK_COUNT):
+ queue = Queue(max_size_bytes=QUEUE_SIZE)
+ task_queues.append(queue)
+
+ # create write processes
+ for i in range(WRITE_TASK_COUNT):
+ p = Process(target=run_write_task, args=(i, task_queues[i]))
+ p.start()
+ logging.debug(f"WriteTask-{i} started with pid {p.pid}")
+ write_processes.append(p)
+
+ # create read processes
+ for i in range(READ_TASK_COUNT):
+ queues = assign_queues(i, task_queues)
+ p = Process(target=run_read_task, args=(i, queues))
+ p.start()
+ logging.debug(f"ReadTask-{i} started with pid {p.pid}")
+ read_processes.append(p)
+
+ try:
+ monitor_process.join()
+ except KeyboardInterrupt:
+ monitor_process.terminate()
+ [p.terminate() for p in read_processes]
+ [p.terminate() for p in write_processes]
+ [q.close() for q in task_queues]
+
+
+def assign_queues(read_task_id, task_queues):
+ """
+ Compute target queues for a specific read task.
+ """
+ ratio = WRITE_TASK_COUNT / READ_TASK_COUNT
+ from_index = math.floor(read_task_id * ratio)
+ end_index = math.ceil((read_task_id + 1) * ratio)
+ return task_queues[from_index:end_index]
+
+
+if __name__ == '__main__':
+ main()
+# ANCHOR_END: main
diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py
new file mode 100644
index 0000000000000000000000000000000000000000..852860aec0adc8f9b043c9dcd5deb0bf00239201
--- /dev/null
+++ b/docs/examples/python/mockdatasource.py
@@ -0,0 +1,49 @@
+import time
+
+
+class MockDataSource:
+ samples = [
+ "8.8,119,0.32,LosAngeles,0",
+ "10.7,116,0.34,SanDiego,1",
+ "9.9,111,0.33,Hollywood,2",
+ "8.9,113,0.329,Compton,3",
+ "9.4,118,0.141,San Francisco,4"
+ ]
+
+ def __init__(self, tb_name_prefix, table_count):
+ self.table_name_prefix = tb_name_prefix + "_"
+ self.table_count = table_count
+ self.max_rows = 10000000
+ self.current_ts = round(time.time() * 1000) - self.max_rows * 100
+ # [(tableId, tableName, values),]
+ self.data = self._init_data()
+
+ def _init_data(self):
+ lines = self.samples * (self.table_count // 5 + 1)
+ data = []
+ for i in range(self.table_count):
+ table_name = self.table_name_prefix + str(i)
+ data.append((i, table_name, lines[i])) # tableId, row
+ return data
+
+ def __iter__(self):
+ self.row = 0
+ return self
+
+ def __next__(self):
+ """
+ next 1000 rows for each table.
+ return: {tableId:[row,...]}
+ """
+ # generate 1000 timestamps
+ ts = []
+ for _ in range(1000):
+ self.current_ts += 100
+ ts.append(str(self.current_ts))
+ # add timestamp to each row
+ # [(tableId, ["tableName,ts,current,voltage,phase,location,groupId"])]
+ result = []
+ for table_id, table_name, values in self.data:
+ rows = [table_name + ',' + t + ',' + values for t in ts]
+ result.append((table_id, rows))
+ return result
diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..758167376b009f21afc701be7d89c1bfbabdeb9f
--- /dev/null
+++ b/docs/examples/python/sql_writer.py
@@ -0,0 +1,90 @@
+import logging
+import taos
+
+
+class SQLWriter:
+ log = logging.getLogger("SQLWriter")
+
+ def __init__(self, get_connection_func):
+ self._tb_values = {}
+ self._tb_tags = {}
+ self._conn = get_connection_func()
+ self._max_sql_length = self.get_max_sql_length()
+ self._conn.execute("USE test")
+
+ def get_max_sql_length(self):
+ rows = self._conn.query("SHOW variables").fetch_all()
+ for r in rows:
+ name = r[0]
+ if name == "maxSQLLength":
+ return int(r[1])
+ return 1024 * 1024
+
+ def process_lines(self, lines: str):
+ """
+ :param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
+ """
+ for line in lines:
+ ps = line.split(",")
+ table_name = ps[0]
+ value = '(' + ",".join(ps[1:-2]) + ') '
+ if table_name in self._tb_values:
+ self._tb_values[table_name] += value
+ else:
+ self._tb_values[table_name] = value
+
+ if table_name not in self._tb_tags:
+ location = ps[-2]
+ group_id = ps[-1]
+ tag_value = f"('{location}',{group_id})"
+ self._tb_tags[table_name] = tag_value
+ self.flush()
+
+ def flush(self):
+ """
+ Assemble INSERT statement and execute it.
+ When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created.
+ In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed.
+ """
+ sql = "INSERT INTO "
+ sql_len = len(sql)
+ buf = []
+ for tb_name, values in self._tb_values.items():
+ q = tb_name + " VALUES " + values
+ if sql_len + len(q) >= self._max_sql_length:
+ sql += " ".join(buf)
+ self.execute_sql(sql)
+ sql = "INSERT INTO "
+ sql_len = len(sql)
+ buf = []
+ buf.append(q)
+ sql_len += len(q)
+ sql += " ".join(buf)
+ self.execute_sql(sql)
+ self._tb_values.clear()
+
+ def execute_sql(self, sql):
+ try:
+ self._conn.execute(sql)
+ except taos.Error as e:
+ error_code = e.errno & 0xffff
+ # Table does not exit
+ if error_code == 9731:
+ self.create_tables()
+ else:
+ self.log.error("Execute SQL: %s", sql)
+ raise e
+ except BaseException as baseException:
+ self.log.error("Execute SQL: %s", sql)
+ raise baseException
+
+ def create_tables(self):
+ sql = "CREATE TABLE "
+ for tb in self._tb_values.keys():
+ tag_values = self._tb_tags[tb]
+ sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " "
+ try:
+ self._conn.execute(sql)
+ except BaseException as e:
+ self.log.error("Execute SQL: %s", sql)
+ raise e
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 79d5424ac2e67e05c346e546847c743595d7a82b..f9127121f35c8cdb9d28e121c20b9b7bb9101625 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -4,22 +4,22 @@ sidebar_label: 文档首页
slug: /
---
-TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB ), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
+TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB ), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。
-TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
+TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[基本概念](./concept)一章。
-如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。
+如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
-我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[部署集群](./deployment)一章。
+我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
-TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
+TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
-如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。
+如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
-如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。
+如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
-如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
+如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
-最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。
+最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
Together, we make a difference!
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index a6ef2b94b6c0b030e967c498a36fd8ae4655f724..9a0a6fb547ceef373bf7f57e333fda93d40eb5b6 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -1,74 +1,98 @@
---
title: 产品简介
+description: 简要介绍 TDengine 的主要功能
toc_max_heading_level: 2
---
-TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB )。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
+TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
-本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
+本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要功能
-TDengine的主要功能如下:
-
-1. 高速数据写入,除 [SQL 写入](../develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](../reference/schemaless/),支持 [InfluxDB LINE 协议](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json)等协议写入;
-2. 第三方数据采集工具 [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQ](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入;
-3. 支持[各种查询](../develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等
-4. 支持[用户自定义函数](../develop/udf)
-5. 支持[缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis
-6. 支持[流式计算](../develop/stream)(Stream Processing)
-7. 支持[数据订阅](../develop/tmq),而且可以指定过滤条件
-8. 支持[集群](../deployment/),可以通过多节点进行水平扩展,并通过多副本实现高可靠
-9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
-10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export)
-11. 支持对[TDengine 集群本身的监控](../operation/monitor)
-12. 提供 [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) 等多种编程语言的[连接器](../reference/connector/)
-13. 支持 [REST 接口](../reference/rest-api/)
-14. 支持与[ Grafana 无缝集成](../third-party/grafana)
-15. 支持与 Google Data Studio 无缝集成
-16. 支持 [Kubernetes 部署](../deployment/k8s)
-
-更多细小的功能,请阅读整个文档。
+TDengine 的主要功能如下:
+
+1. 写入数据,支持
+ - [SQL 写入](../develop/insert-data/sql-writing)
+ - [无模式(Schemaless)写入](../reference/schemaless/),支持多种标准写入协议
+ - [InfluxDB Line 协议](../develop/insert-data/influxdb-line)
+ - [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet)
+ - [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json)
+ - 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine
+ - [Telegraf](../third-party/telegraf)
+ - [Prometheus](../third-party/prometheus)
+ - [StatsD](../third-party/statsd)
+ - [collectd](../third-party/collectd)
+ - [Icinga2](../third-party/icinga2)
+ - [TCollector](../third-party/tcollector)
+ - [EMQX](../third-party/emq-broker)
+ - [HiveMQ](../third-party/hive-mq-broker)
+2. 查询数据,支持
+ - [标准 SQL](../taos-sql),含嵌套查询
+ - [时序数据特色函数](../taos-sql/function/#time-series-extensions)
+ - [时序数据特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等
+ - [用户自定义函数(UDF)](../taos-sql/udf)
+3. [缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis 就能对时序数据进行高效处理
+4. [流式计算(Stream Processing)](../develop/stream),TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流式计算组件
+5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,提供与 Kafka 相同的 API,而且可以指定过滤条件
+6. 可视化
+ - 支持与 [Grafana](../third-party/grafana/) 的无缝集成
+ - 支持与 Google Data Studio 的无缝集成
+7. 集群
+ - [集群部署](../deployment/),可以通过增加节点进行水平扩展以提升处理能力
+ - 可以通过 [Kubernetes 部署 TDengine](../deployment/k8s/)
+ - 通过多副本提供高可用能力
+8. 管理
+ - [监控](../operation/monitor)运行中的 TDengine 实例
+ - 多种[数据导入](../operation/import)方式
+ - 多种[数据导出](../operation/export)方式
+9. 工具
+ - 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
+ - 提供压力测试工具[taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
+10. 编程
+ - 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
+ - 支持 [REST 接口](../connector/rest-api/)
+
+更多细节功能,请阅读整个文档。
## 竞争优势
-由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点:
+由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,因此与其他时序数据库相比,TDengine 有以下特点:
-- **[高性能](https://www.taosdata.com/tdengine/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
+- **[高性能](https://www.taosdata.com/tdengine/fast)**:TDengine 是唯一一个解决了时序数据存储的高基数难题的时序数据库,支持上亿数据采集点,并在数据插入、查询和数据压缩上远胜其它时序数据库。
-- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
+- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建缓存、流式计算和数据订阅等功能,为时序数据的处理提供了极简的解决方案,从而大幅降低了业务系统的设计复杂度和运维成本。
-- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
+- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernetes 部署和完整的可观测性,TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。
-- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。
+- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:对系统管理员来说,TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说,TDengine 提供了便捷的数据访问能力。
-- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
+- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。
-- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
+- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
-1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低
+1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低
2. 因为支持 SQL,能与众多第三方软件无缝集成,学习迁移成本大幅下降
-3. 因为是一极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
-4. 因为维护简单,运营维护成本能大幅降低
+3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
## 技术生态
-在整个时序大数据平台中,TDengine 在其中扮演的角色如下:
+在整个时序大数据平台中,TDengine 扮演的角色如下:
![TDengine Database 技术生态图](eco_system.webp)
+图 1. TDengine 技术生态图
-图 1. TDengine技术生态图
-上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。
+上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
-## 总体适用场景
+## 典型适用场景
-作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。
+作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求
@@ -90,18 +114,18 @@ TDengine的主要功能如下:
### 系统功能需求
-| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- |
-| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 |
-| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 |
+| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- |
+| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 |
+| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 |
### 系统性能需求
-| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ |
-| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
-| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
-| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
+| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- |
+| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
+| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
+| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
### 系统维护需求
diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md
index 8e97d4a2f43537c1229c8e8ea092ddfc1257dde7..89d3df9c973d9a319397285599e6b2e6be3785de 100644
--- a/docs/zh/04-concept/index.md
+++ b/docs/zh/04-concept/index.md
@@ -1,5 +1,7 @@
---
+sidebar_label: 基本概念
title: 数据模型和基本概念
+description: TDengine 的数据模型和基本概念
---
为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 location 和分组 group ID 的静态属性. 其采集的数据类似如下的表格:
@@ -104,15 +106,15 @@ title: 数据模型和基本概念
## 采集量 (Metric)
-采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。
+采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。
## 标签 (Label/Tag)
-标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。
+标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的location与groupId就是标签。
## 数据采集点 (Data Collection Point)
-数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。
+数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的d1001, d1002, d1003, d1004等就是数据采集点。
## 表 (Table)
@@ -131,13 +133,14 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表
对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一台汽车建立多张表。
+
## 超级表 (STable)
由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。
超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。
-在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。
+在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表meters.
## 子表 (Subtable)
@@ -156,7 +159,9 @@ TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表
查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。
-TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。
+TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。
+
+为了更好地理解超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。 ![智能电表数据模型示意图](./supertable.webp)
## 库 (database)
diff --git a/docs/zh/04-concept/supertable.webp b/docs/zh/04-concept/supertable.webp
new file mode 100644
index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37
Binary files /dev/null and b/docs/zh/04-concept/supertable.webp differ
diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md
index f0f09d4c7eeb9e5669008c4c95be5eade58b2090..e2be4195176a3f1ac7712a036d04b60b2fb77718 100644
--- a/docs/zh/05-get-started/01-docker.md
+++ b/docs/zh/05-get-started/01-docker.md
@@ -1,6 +1,7 @@
---
sidebar_label: Docker
title: 通过 Docker 快速体验 TDengine
+description: 使用 Docker 快速体验 TDengine 的高效写入和查询
---
本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
index c1a67f018253fa31c732d246814eb612608cc355..3e0fb056a5913d3a82a473bf879a79e398176075 100644
--- a/docs/zh/05-get-started/03-package.md
+++ b/docs/zh/05-get-started/03-package.md
@@ -1,6 +1,7 @@
---
sidebar_label: 安装包
title: 使用安装包立即开始
+description: 使用安装包快速体验 TDengine
---
import Tabs from "@theme/Tabs";
@@ -9,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3";
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
-TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../reference/rest-api/)。
+TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
@@ -67,13 +68,6 @@ install.sh 安装脚本在执行过程中,会通过命令行交互界面询问
-
-
-1. 从列表中下载获得 exe 安装程序;
-
-2. 运行可执行程序来安装 TDengine。
-
-
可以使用 apt-get 工具从官方仓库安装。
@@ -102,11 +96,20 @@ sudo apt-get install tdengine
:::tip
apt-get 方式只适用于 Debian 或 Ubuntu 系统
::::
+
+
+
+注意:目前 TDengine 在 Windows 平台上只支持 Windows server 2016/2019 和 Windows 10/11 系统版本。
+
+1. 从列表中下载获得 exe 安装程序;
+
+2. 运行可执行程序来安装 TDengine。
+
:::info
-下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases)
+下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)
:::
:::note
diff --git a/docs/zh/05-get-started/_pkg_install.mdx b/docs/zh/05-get-started/_pkg_install.mdx
deleted file mode 100644
index 83c987af8bcf24a9593105b680d32a0421344d5f..0000000000000000000000000000000000000000
--- a/docs/zh/05-get-started/_pkg_install.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-import PkgList from "/components/PkgList";
-
-TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。
-
-为方便使用,从 2.4.0.10 开始,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
-
-在安装包格式上,我们提供 tar.gz, rpm 和 deb 格式,为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。需要注意的是,rpm 和 deb 包不含 taosdump、taosBenchmark 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。
-
-发布版本包括稳定版和 Beta 版,Beta 版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载:
-
-
-
-具体的安装方法,请参见[安装包的安装和卸载](/operation/pkg-install)。
-
-下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads)
-
-查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases)
diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md
index 794081b4e4c438dee2d8cbe125de4094056f190f..20f8235d87426f7a98ded2f7be431289ea00a045 100644
--- a/docs/zh/05-get-started/index.md
+++ b/docs/zh/05-get-started/index.md
@@ -3,7 +3,7 @@ title: 立即开始
description: '快速设置 TDengine 环境并体验其高效写入和查询'
---
-TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。
+TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../reference/taosadapter) 提供 [RESTful 接口](../connector/rest-api)。
本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。
diff --git a/docs/zh/07-develop/01-connect/_connect_java.mdx b/docs/zh/07-develop/01-connect/_connect_java.mdx
index f5b8ea1cc2bf309bbb182be6ae06100102328a16..86c70ef7dc9a84d61fa36502f83e0be6a0836214 100644
--- a/docs/zh/07-develop/01-connect/_connect_java.mdx
+++ b/docs/zh/07-develop/01-connect/_connect_java.mdx
@@ -12,4 +12,4 @@
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
```
-更多连接参数配置,参考[Java 连接器](/reference/connector/java)
+更多连接参数配置,参考[Java 连接器](../../connector/java)
diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md
index 89faf812fffa281cc1c9df2371d3470252231ce2..075d99cfee78b01b66ebc527892e90b9291dd422 100644
--- a/docs/zh/07-develop/01-connect/index.md
+++ b/docs/zh/07-develop/01-connect/index.md
@@ -1,6 +1,6 @@
---
title: 建立连接
-description: "本节介绍如何使用连接器建立与 TDengine 的连接,给出连接器安装、连接的简单说明。"
+description: 使用连接器建立与 TDengine 的连接,以及连接器的安装和连接
---
import Tabs from "@theme/Tabs";
@@ -14,10 +14,10 @@ import ConnCSNative from "./_connect_cs.mdx";
import ConnC from "./_connect_c.mdx";
import ConnR from "./_connect_r.mdx";
import ConnPHP from "./_connect_php.mdx";
-import InstallOnWindows from "../../14-reference/03-connector/_linux_install.mdx";
-import InstallOnLinux from "../../14-reference/03-connector/_windows_install.mdx";
-import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx";
-import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
+import InstallOnWindows from "../../08-connector/_linux_install.mdx";
+import InstallOnLinux from "../../08-connector/_windows_install.mdx";
+import VerifyLinux from "../../08-connector/_verify_linux.mdx";
+import VerifyWindows from "../../08-connector/_verify_windows.mdx";
TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C#、Rust、Lua(社区贡献)和 PHP (社区贡献)的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
@@ -33,7 +33,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
关键不同点在于:
1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。
-2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](/reference/connector/cpp#订阅和消费-api)等等。
+2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](../../connector/cpp/#参数绑定-api)、[订阅](../../connector/cpp/#订阅和消费-api)等等。
## 安装客户端驱动 taosc
@@ -223,7 +223,7 @@ phpize && ./configure && make -j && make install
**手动指定 TDengine 目录:**
```shell
-phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install
+phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
```
> `--with-tdengine-dir=` 后跟上 TDengine 目录。
diff --git a/docs/zh/07-develop/02-model/index.mdx b/docs/zh/07-develop/02-model/index.mdx
index 1609eb5362cf40e7d134b0987968f7cc9bd31c92..d66059c2cda2a0e4629b16ca44cee036dc67546f 100644
--- a/docs/zh/07-develop/02-model/index.mdx
+++ b/docs/zh/07-develop/02-model/index.mdx
@@ -1,5 +1,7 @@
---
+sidebar_label: 数据建模
title: TDengine 数据建模
+description: TDengine 中如何建立数据模型
---
TDengine 采用类关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
@@ -39,7 +41,7 @@ USE power;
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
```
-与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](/taos-sql/stable) 章节。
+与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TDengine SQL 的超级表管理](/taos-sql/stable) 章节。
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。
@@ -53,7 +55,7 @@ TDengine 对每个数据采集点需要独立建表。与标准的关系型数
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
```
-其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
+其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TDengine SQL 的表管理](/taos-sql/table) 章节。
TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。
diff --git a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
index 214cbdaa96d02e0cd1251eeda97c6a897887cc7e..8818eaae3dc1806a00e73d9846fbd1dfe15e0c8a 100644
--- a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
+++ b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
@@ -23,9 +23,10 @@ import PhpStmt from "./_php_stmt.mdx";
## SQL 写入简介
-应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TAOS Shell,手动输入 INSERT 语句插入数据。
+应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TDengine CLI,手动输入 INSERT 语句插入数据。
### 一次写入一条
+
下面这条 INSERT 就将一条记录写入到表 d1001 中:
```sql
@@ -48,7 +49,7 @@ TDengine 也支持一次向多个表写入数据,比如下面这条命令就
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
```
-详细的 SQL INSERT 语法规则参考 [TAOS SQL 的数据写入](/taos-sql/insert)。
+详细的 SQL INSERT 语法规则参考 [TDengine SQL 的数据写入](/taos-sql/insert)。
:::info
@@ -134,4 +135,3 @@ TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 类似,这
-
diff --git a/docs/zh/07-develop/03-insert-data/05-high-volume.md b/docs/zh/07-develop/03-insert-data/05-high-volume.md
new file mode 100644
index 0000000000000000000000000000000000000000..d7581467ae0315442d89de395d35bbd677f75d3a
--- /dev/null
+++ b/docs/zh/07-develop/03-insert-data/05-high-volume.md
@@ -0,0 +1,436 @@
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+# 高效写入
+
+本节介绍如何高效地向 TDengine 写入数据。
+
+## 高效写入原理 {#principle}
+
+### 客户端程序的角度 {#application-view}
+
+从客户端程序的角度来说,高效写入数据要考虑以下几个因素:
+
+1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。
+2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
+3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;
+4. 写入方式。一般来讲:
+ - 参数绑定写入比 SQL 写入更高效。因参数绑定方式避免了 SQL 解析。(但增加了 C 接口的调用次数,对于连接器也有性能损耗)。
+ - SQL 写入不自动建表比自动建表更高效。因自动建表要频繁检查表是否存在
+ - SQL 写入比无模式写入更高效。因无模式写入会自动建表且支持动态更改表结构
+
+客户端程序要充分且恰当地利用以上几个因素。在单次写入中尽量只向同一张表(或子表)写入数据,每批次写入的数据量经过测试和调优设定为一个最适合当前系统处理能力的数值,并发写入的连接数同样经过测试和调优后设定为一个最适合当前系统处理能力的数值,以实现在当前系统中的最佳写入速度。
+
+### 数据源的角度 {#datasource-view}
+
+客户端程序通常需要从数据源读数据再写入 TDengine。从数据源角度来说,以下几种情况需要在读线程和写线程之间增加队列:
+
+1. 有多个数据源,单个数据源生成数据的速度远小于单线程写入的速度,但数据量整体比较大。此时队列的作用是把多个数据源的数据汇聚到一起,增加单次写入的数据量。
+2. 单个数据源生成数据的速度远大于单线程写入的速度。此时队列的作用是增加写入的并发度。
+3. 单张表的数据分散在多个数据源。此时队列的作用是将同一张表的数据提前汇聚到一起,提高写入时数据的相邻性。
+
+如果写应用的数据源是 Kafka, 写应用本身即 Kafka 的消费者,则可利用 Kafka 的特性实现高效写入。比如:
+
+1. 将同一张表的数据写到同一个 Topic 的同一个 Partition,增加数据的相邻性
+2. 通过订阅多个 Topic 实现数据汇聚
+3. 通过增加 Consumer 线程数增加写入的并发度
+4. 通过增加每次 fetch 的最大数据量来增加单次写入的最大数据量
+
+### 服务器配置的角度 {#setting-view}
+
+从服务端配置的角度,要根据系统中磁盘的数量,磁盘的 I/O 能力,以及处理器能力在创建数据库时设置适当的 vgroups 数量以充分发挥系统性能。如果 vgroups 过少,则系统性能无法发挥;如果 vgroups 过多,会造成无谓的资源竞争。常规推荐 vgroups 数量为 CPU 核数的 2 倍,但仍然要结合具体的系统资源配置进行调优。
+
+更多调优参数,请参考 [数据库管理](../../../taos-sql/database) 和 [服务端配置](../../../reference/config)。
+
+## 高效写入示例 {#sample-code}
+
+### 场景设计 {#scenario}
+
+下面的示例程序展示了如何高效写入数据,场景设计如下:
+
+- TDengine 客户端程序从其它数据源不断读入数据,在示例程序中采用生成模拟数据的方式来模拟读取数据源
+- 单个连接向 TDengine 写入的速度无法与读数据的速度相匹配,因此客户端程序启动多个线程,每个线程都建立了与 TDengine 的连接,每个线程都有一个独占的固定大小的消息队列
+- 客户端程序将接收到的数据根据所属的表名(或子表名)HASH 到不同的线程,即写入该线程所对应的消息队列,以此确保属于某个表(或子表)的数据一定会被一个固定的线程处理
+- 各个子线程在将所关联的消息队列中的数据读空后或者读取数据量达到一个预定的阈值后将该批数据写入 TDengine,并继续处理后面接收到的数据
+
+![TDengine 高效写入示例场景的线程模型](highvolume.webp)
+
+### 示例代码 {#code}
+
+这一部分是针对以上场景的示例代码。对于其它场景高效写入原理相同,不过代码需要适当修改。
+
+本示例代码假设源数据属于同一张超级表(meters)的不同子表。程序在开始写入数据之前已经在 test 库创建了这个超级表。对于子表,将根据收到的数据,由应用程序自动创建。如果实际场景是多个超级表,只需修改写任务自动建表的代码。
+
+
+
+
+**程序清单**
+
+| 类名 | 功能说明 |
+| ---------------- | --------------------------------------------------------------------------- |
+| FastWriteExample | 主程序 |
+| ReadTask | 从模拟源中读取数据,将表名经过 hash 后得到 Queue 的 index,写入对应的 Queue |
+| WriteTask | 从 Queue 中获取数据,组成一个 Batch,写入 TDengine |
+| MockDataSource | 模拟生成一定数量 meters 子表的数据 |
+| SQLWriter | WriteTask 依赖这个类完成 SQL 拼接、自动建表、 SQL 写入、SQL 长度检查 |
+| StmtWriter | 实现参数绑定方式批量写入(暂未完成) |
+| DataBaseMonitor | 统计写入速度,并每隔 10 秒把当前写入速度打印到控制台 |
+
+
+以下是各类的完整代码和更详细的功能说明。
+
+
+FastWriteExample
+主程序负责:
+
+1. 创建消息队列
+2. 启动写线程
+3. 启动读线程
+4. 每隔 10 秒统计一次写入速度
+
+主程序默认暴露了 4 个参数,每次启动程序都可调节,用于测试和调优:
+
+1. 读线程个数。默认为 1。
+2. 写线程个数。默认为 3。
+3. 模拟生成的总表数。默认为 1000。将会平分给各个读线程。如果总表数较大,建表需要花费较长,开始统计的写入速度可能较慢。
+4. 每批最多写入记录数量。默认为 3000。
+
+队列容量(taskQueueCapacity)也是与性能有关的参数,可通过修改程序调节。一般来讲,队列容量越大,入队被阻塞的概率越小,队列的吞吐量越大,但是内存占用也会越大。 示例程序默认值已经设置地足够大。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
+```
+
+
+
+
+ReadTask
+
+读任务负责从数据源读数据。每个读任务都关联了一个模拟数据源。每个模拟数据源可生成一点数量表的数据。不同的模拟数据源生成不同表的数据。
+
+读任务采用阻塞的方式写消息队列。也就是说,一旦队列满了,写操作就会阻塞。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
+```
+
+
+
+
+WriteTask
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
+```
+
+
+
+
+
+MockDataSource
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
+```
+
+
+
+
+
+SQLWriter
+
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。注意,所有的表都没有提前创建,而是在 catch 到表不存在异常的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它异常,这里简单地记录当时执行的 SQL 语句到日志中,你也可以记录更多线索到日志,已便排查错误和故障恢复。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
+```
+
+
+
+
+
+DataBaseMonitor
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
+```
+
+
+
+**执行步骤**
+
+
+执行 Java 示例程序
+
+执行程序前需配置环境变量 `TDENGINE_JDBC_URL`。如果 TDengine Server 部署在本机,且用户名、密码和端口都是默认值,那么可配置:
+
+```
+TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+```
+
+**本地集成开发环境执行示例程序**
+
+1. clone TDengine 仓库
+ ```
+ git clone git@github.com:taosdata/TDengine.git --depth 1
+ ```
+2. 用集成开发环境打开 `docs/examples/java` 目录。
+3. 在开发环境中配置环境变量 `TDENGINE_JDBC_URL`。如果已配置了全局的环境变量 `TDENGINE_JDBC_URL` 可跳过这一步。
+4. 运行类 `com.taos.example.highvolume.FastWriteExample`。
+
+**远程服务器上执行示例程序**
+
+若要在服务器上执行示例程序,可按照下面的步骤操作:
+
+1. 打包示例代码。在目录 TDengine/docs/examples/java 下执行:
+ ```
+ mvn package
+ ```
+2. 远程服务器上创建 examples 目录:
+ ```
+ mkdir -p examples/java
+ ```
+3. 复制依赖到服务器指定目录:
+ - 复制依赖包,只用复制一次
+ ```
+ scp -r .\target\lib @:~/examples/java
+ ```
+ - 复制本程序的 jar 包,每次更新代码都需要复制
+ ```
+ scp -r .\target\javaexample-1.0.jar @:~/examples/java
+ ```
+4. 配置环境变量。
+ 编辑 `~/.bash_profile` 或 `~/.bashrc` 添加如下内容例如:
+
+ ```
+ export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+ ```
+
+ 以上使用的是本地部署 TDengine Server 时默认的 JDBC URL。你需要根据自己的实际情况更改。
+
+5. 用 java 命令启动示例程序,命令模板:
+
+ ```
+ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample
+ ```
+
+6. 结束测试程序。测试程序不会自动结束,在获取到当前配置下稳定的写入速度后,按 CTRL + C 结束程序。
+ 下面是一次实际运行的日志输出,机器配置 16核 + 64G + 固态硬盘。
+
+ ```
+ root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
+ 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
+ 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
+ 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
+ 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
+ 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
+ 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
+ 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
+ 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
+ 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
+ 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
+ ```
+
+
+
+
+
+
+**程序清单**
+
+Python 示例程序中采用了多进程的架构,并使用了跨进程的消息队列。
+
+| 函数或类 | 功能说明 |
+| ------------------------ | -------------------------------------------------------------------- |
+| main 函数 | 程序入口, 创建各个子进程和消息队列 |
+| run_monitor_process 函数 | 创建数据库,超级表,统计写入速度并定时打印到控制台 |
+| run_read_task 函数 | 读进程主要逻辑,负责从其它数据系统读数据,并分发数据到为之分配的队列 |
+| MockDataSource 类 | 模拟数据源, 实现迭代器接口,每次批量返回每张表的接下来 1000 条数据 |
+| run_write_task 函数 | 写进程主要逻辑。每次从队列中取出尽量多的数据,并批量写入 |
+| SQLWriter类 | SQL 写入和自动建表 |
+| StmtWriter 类 | 实现参数绑定方式批量写入(暂未完成) |
+
+
+
+main 函数
+
+main 函数负责创建消息队列和启动子进程,子进程有 3 类:
+
+1. 1 个监控进程,负责数据库初始化和统计写入速度
+2. n 个读进程,负责从其它数据系统读数据
+3. m 个写进程,负责写数据库
+
+main 函数可以接收 5 个启动参数,依次是:
+
+1. 读任务(进程)数, 默认为 1
+2. 写任务(进程)数, 默认为 1
+3. 模拟生成的总表数,默认为 1000
+4. 队列大小(单位字节),默认为 1000000
+5. 每批最多写入记录数量, 默认为 3000
+
+```python
+{{#include docs/examples/python/fast_write_example.py:main}}
+```
+
+
+
+
+run_monitor_process
+
+监控进程负责初始化数据库,并监控当前的写入速度。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:monitor}}
+```
+
+
+
+
+
+run_read_task 函数
+
+读进程,负责从其它数据系统读数据,并分发数据到为之分配的队列。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:read}}
+```
+
+
+
+
+
+MockDataSource
+
+以下是模拟数据源的实现,我们假设数据源生成的每一条数据都带有目标表名信息。实际中你可能需要一定的规则确定目标表名。
+
+```python
+{{#include docs/examples/python/mockdatasource.py}}
+```
+
+
+
+
+run_write_task 函数
+
+写进程每次从队列中取出尽量多的数据,并批量写入。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:write}}
+```
+
+
+
+
+
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,根据 TDengine 3.0 的限制由输入参数 maxSQLLength 传入了支持的最大 SQL 长度,即 1048576 。
+
+SQLWriter
+
+```python
+{{#include docs/examples/python/sql_writer.py}}
+```
+
+
+
+**执行步骤**
+
+
+
+执行 Python 示例程序
+
+1. 前提条件
+
+ - 已安装 TDengine 客户端驱动
+ - 已安装 Python3, 推荐版本 >= 3.8
+ - 已安装 taospy
+
+2. 安装 faster-fifo 代替 python 内置的 multiprocessing.Queue
+
+ ```
+ pip3 install faster-fifo
+ ```
+
+3. 点击上面的“查看源码”链接复制 `fast_write_example.py` 、 `sql_writer.py` 和 `mockdatasource.py` 三个文件。
+
+4. 执行示例程序
+
+ ```
+ python3 fast_write_example.py
+ ```
+
+ 下面是一次实际运行的输出, 机器配置 16核 + 64G + 固态硬盘。
+
+ ```
+ root@vm85$ python3 fast_write_example.py 8 8
+ 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
+ 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
+ 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
+ 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
+ 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
+ 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
+ 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
+ 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
+ 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
+ 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
+ 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
+ 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
+ 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
+ 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
+ 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
+ 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
+ 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
+ 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
+ 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
+ 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
+ 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
+ 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
+ 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
+ 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
+ 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
+ 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
+ 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
+ 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
+ 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
+ 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
+ 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
+ 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
+ ```
+
+
+
+:::note
+使用 Python 连接器多进程连接 TDengine 的时候,有一个限制:不能在父进程中建立连接,所有连接只能在子进程中创建。
+如果在父进程中创建连接,子进程再创建连接就会一直阻塞。这是个已知问题。
+
+:::
+
+
+
+
+
diff --git a/docs/zh/07-develop/03-insert-data/highvolume.webp b/docs/zh/07-develop/03-insert-data/highvolume.webp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad
Binary files /dev/null and b/docs/zh/07-develop/03-insert-data/highvolume.webp differ
diff --git a/docs/zh/07-develop/03-insert-data/index.md b/docs/zh/07-develop/03-insert-data/index.md
index 55a28e4a8ba13501e2f481c9aba67b7300da98d0..f1e5ada4dfd350e982fa0ae57412af07ac43e03a 100644
--- a/docs/zh/07-develop/03-insert-data/index.md
+++ b/docs/zh/07-develop/03-insert-data/index.md
@@ -1,5 +1,7 @@
---
+sidebar_label: 写入数据
title: 写入数据
+description: TDengine 的各种写入方式
---
TDengine 支持多种写入协议,包括 SQL,InfluxDB Line 协议, OpenTSDB Telnet 协议,OpenTSDB JSON 格式协议。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。同时,TDengine 支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。InfluxDB Line 协议、OpenTSDB Telnet 协议和 OpenTSDB JSON 格式协议是 TDengine 支持的三种无模式写入协议。使用无模式方式写入无需提前创建超级表和子表,并且引擎能自适用数据对表结构做调整。
diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx
index 2631d147a5f3e968e7153de8576e96f2c07c57cd..d6156c8a59a70af80f2632cdf3801ef7281b69d5 100644
--- a/docs/zh/07-develop/04-query-data/index.mdx
+++ b/docs/zh/07-develop/04-query-data/index.mdx
@@ -1,4 +1,5 @@
---
+sidebar_label: 查询数据
title: 查询数据
description: "主要查询功能,通过连接器执行同步查询和异步查询"
---
@@ -43,7 +44,7 @@ Query OK, 2 row(s) in set (0.001100s)
为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。
-具体的查询语法请看 [TAOS SQL 的数据查询](../../taos-sql/select) 章节。
+具体的查询语法请看 [TDengine SQL 的数据查询](../../taos-sql/select) 章节。
## 多表聚合查询
@@ -51,7 +52,7 @@ Query OK, 2 row(s) in set (0.001100s)
### 示例一
-在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
+在 TDengine CLI,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
```
taos> SELECT AVG(voltage), location FROM meters GROUP BY location;
@@ -64,7 +65,7 @@ Query OK, 2 rows in database (0.005995s)
### 示例二
-在 TAOS shell, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。
+在 TDengine CLI, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。
```
taos> SELECT count(*), max(current) FROM meters where groupId = 2;
@@ -74,7 +75,7 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2;
Query OK, 1 row(s) in set (0.002136s)
```
-在 [TAOS SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
+在 [TDengine SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
## 降采样查询、插值
@@ -122,7 +123,7 @@ Query OK, 6 rows in database (0.005515s)
如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。
-语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
+语法规则细节请见 [TDengine SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
## 示例代码
diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx
index da8bf5e20ed9d230419150dd10ee6739d85a37e9..2f5c13d9b0bc0e3940fb99b45c693e2ae80c8f47 100644
--- a/docs/zh/07-develop/07-tmq.mdx
+++ b/docs/zh/07-develop/07-tmq.mdx
@@ -64,7 +64,7 @@ DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
```
-这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
+这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
diff --git a/docs/zh/07-develop/08-cache.md b/docs/zh/07-develop/08-cache.md
index bd9da6062d3cc1a21be418079f0fee40520f4460..29e28e3dde0816d9e5a08f74abd2382854d336da 100644
--- a/docs/zh/07-develop/08-cache.md
+++ b/docs/zh/07-develop/08-cache.md
@@ -20,11 +20,11 @@ create database db0 vgroups 100 buffer 16MB
## 读缓存
-在创建数据库时可以选择是否缓存该数据库中每个子表的最新数据。由参数 cachelast 设置,分为三种情况:
-- 0: 不缓存
-- 1: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能
-- 2: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能
-- 3: 同时缓存行和列,即等同于上述 cachelast 值为 1 或 2 时的行为同时生效
+在创建数据库时可以选择是否缓存该数据库中每个子表的最新数据。由参数 cachemodel 设置,分为四种情况:
+- none: 不缓存
+- last_row: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能
+- last_value: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能
+- both: 同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效
## 元数据缓存
diff --git a/docs/zh/07-develop/index.md b/docs/zh/07-develop/index.md
index 4d0f3c3cea3da3d70051dd07f835c34b4f47c3cd..efaffaea71ce68ee0a8ddbf5634c4150adc94bfb 100644
--- a/docs/zh/07-develop/index.md
+++ b/docs/zh/07-develop/index.md
@@ -1,5 +1,7 @@
---
title: 开发指南
+sidebar_label: 开发指南
+description: 让开发者能够快速上手的指南
---
开发一个应用,如果你准备采用TDengine作为时序数据处理的工具,那么有如下几个事情要做:
@@ -12,7 +14,7 @@ title: 开发指南
7. 在很多场景下(如车辆管理),应用需要获取每个数据采集点的最新状态,那么建议你采用TDengine的cache功能,而不用单独部署Redis等缓存软件。
8. 如果你发现TDengine的函数无法满足你的要求,那么你可以使用用户自定义函数来解决问题。
-本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](/reference/connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](/third-party/)。
+本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](../connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](../third-party/)。
如果在开发过程中遇到任何问题,请点击每个页面下方的["反馈问题"](https://github.com/taosdata/TDengine/issues/new/choose), 在GitHub上直接递交issue。
diff --git a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx b/docs/zh/08-connector/02-rest-api.mdx
similarity index 96%
rename from docs/zh/14-reference/02-rest-api/02-rest-api.mdx
rename to docs/zh/08-connector/02-rest-api.mdx
index 4b9171c07d165bfa10aea14871da2697cae4b54d..e254244657b457e10bc2daab020b230c9a8bb2cc 100644
--- a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs/zh/08-connector/02-rest-api.mdx
@@ -1,5 +1,7 @@
---
title: REST API
+sidebar_label: REST API
+description: 详细介绍 TDengine 提供的 RESTful API.
---
为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见 [视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
@@ -10,7 +12,7 @@ title: REST API
## 安装
-RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。
+RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。TDengine 的 RESTful API 由 [taosAdapter](../../reference/taosadapter) 提供,在使用 RESTful API 之前需要确保 `taosAdapter` 正常运行。
## 验证
diff --git a/docs/zh/14-reference/03-connector/cpp.mdx b/docs/zh/08-connector/03-cpp.mdx
similarity index 99%
rename from docs/zh/14-reference/03-connector/cpp.mdx
rename to docs/zh/08-connector/03-cpp.mdx
index bd5776d035b0228637f7ed2255c502ed73d6a654..c0bd33f12964537699849e35644a8c04e0f716f0 100644
--- a/docs/zh/14-reference/03-connector/cpp.mdx
+++ b/docs/zh/08-connector/03-cpp.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 1
sidebar_label: C/C++
title: C/C++ Connector
---
@@ -22,7 +21,7 @@ TDengine 客户端驱动的动态库位于:
## 支持的平台
-请参考[支持的平台列表](/reference/connector#支持的平台)
+请参考[支持的平台列表](../#支持的平台)
## 支持的版本
@@ -30,7 +29,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
## 安装步骤
-TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤)
+TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤)
## 建立连接
diff --git a/docs/zh/14-reference/03-connector/java.mdx b/docs/zh/08-connector/04-java.mdx
similarity index 99%
rename from docs/zh/14-reference/03-connector/java.mdx
rename to docs/zh/08-connector/04-java.mdx
index 183994313e205bbaf13f30d534fa151a23216708..6b1715f8c6a2f949fca552885ea3920f43e8a849 100644
--- a/docs/zh/14-reference/03-connector/java.mdx
+++ b/docs/zh/08-connector/04-java.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 2
sidebar_label: Java
title: TDengine Java Connector
description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原生连接与 REST连接两种连接器。
@@ -35,7 +34,7 @@ REST 连接支持所有能运行 Java 的平台。
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
## TDengine DataType 和 Java DataType
@@ -64,7 +63,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
使用 Java Connector 连接数据库前,需要具备以下条件:
- 已安装 Java 1.8 或以上版本运行时环境和 Maven 3.6 或以上版本
-- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
+- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
### 安装连接器
@@ -630,7 +629,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws
### 无模式写入
-TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../schemaless)。
+TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
**注意**:
diff --git a/docs/zh/14-reference/03-connector/go.mdx b/docs/zh/08-connector/05-go.mdx
similarity index 95%
rename from docs/zh/14-reference/03-connector/go.mdx
rename to docs/zh/08-connector/05-go.mdx
index a87c948d4a3c0e0764e6c4823608bf7d8b171f24..9d30f75190cddbb17c40e97655002a158cd6aae6 100644
--- a/docs/zh/14-reference/03-connector/go.mdx
+++ b/docs/zh/08-connector/05-go.mdx
@@ -9,11 +9,11 @@ import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Preparition from "./_preparition.mdx"
-import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
-import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
-import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
-import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx"
-import GoQuery from "../../07-develop/04-query-data/_go.mdx"
+import GoInsert from "../07-develop/03-insert-data/_go_sql.mdx"
+import GoInfluxLine from "../07-develop/03-insert-data/_go_line.mdx"
+import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx"
+import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx"
+import GoQuery from "../07-develop/04-query-data/_go.mdx"
`driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。
@@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
## 支持的功能特性
@@ -56,7 +56,7 @@ REST 连接支持所有能运行 Go 的平台。
### 安装前准备
* 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上)
-* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
+* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
配置好环境变量,检查命令:
diff --git a/docs/zh/14-reference/03-connector/rust.mdx b/docs/zh/08-connector/06-rust.mdx
similarity index 97%
rename from docs/zh/14-reference/03-connector/rust.mdx
rename to docs/zh/08-connector/06-rust.mdx
index ae644e191166e244ae42373aeef2cbbacbe9e0e1..26f53c82d630fda168dd98b4c8ec993afc5e3a1d 100644
--- a/docs/zh/14-reference/03-connector/rust.mdx
+++ b/docs/zh/08-connector/06-rust.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 5
sidebar_label: Rust
title: TDengine Rust Connector
---
@@ -9,9 +8,9 @@ import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Preparition from "./_preparition.mdx"
-import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
-import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
-import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
+import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
+import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
+import RustQuery from "../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
@@ -28,7 +27,7 @@ Websocket 连接支持所有能运行 Rust 的平台。
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
@@ -37,7 +36,7 @@ Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容
### 安装前准备
* 安装 Rust 开发工具链
-* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
+* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
### 添加 taos 依赖
diff --git a/docs/zh/14-reference/03-connector/python.mdx b/docs/zh/08-connector/07-python.mdx
similarity index 95%
rename from docs/zh/14-reference/03-connector/python.mdx
rename to docs/zh/08-connector/07-python.mdx
index d7b17dc74a6d62da3adfd1a10d8d62a9570226a1..0242486d3b8820ac38301d38ccbaf8bb9fc7e1c3 100644
--- a/docs/zh/14-reference/03-connector/python.mdx
+++ b/docs/zh/08-connector/07-python.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 3
sidebar_label: Python
title: TDengine Python Connector
description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:tasos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas"
@@ -8,7 +7,7 @@ description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](/reference/connector/cpp)和 [REST 接口](/reference/rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。
+`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../cpp)和 [REST 接口](../rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。
除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。
使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口与服务端建立的连接的方式下文中称为“REST 连接”。
@@ -17,7 +16,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
## 支持的平台
-- 原生连接[支持的平台](/reference/connector/#支持的平台)和 TDengine 客户端支持的平台一致。
+- 原生连接[支持的平台](../#支持的平台)和 TDengine 客户端支持的平台一致。
- REST 连接支持所有能运行 Python 的平台。
## 版本选择
@@ -150,10 +149,19 @@ curl -u root:taosdata http://:/rest/sql -d "select server_version()"
```json
{
- "status": "succ",
- "head": ["server_version()"],
- "column_meta": [["server_version()", 8, 8]],
- "data": [["2.4.0.16"]],
+ "code": 0,
+ "column_meta": [
+ [
+ "server_version()",
+ "VARCHAR",
+ 7
+ ]
+ ],
+ "data": [
+ [
+ "3.0.0.0"
+ ]
+ ],
"rows": 1
}
```
@@ -266,7 +274,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
##### RestClient 类的使用
-`RestClient` 类是对于 [REST API](/reference/rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
+`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
```python title="RestClient 的使用"
{{#include docs/examples/python/rest_client_example.py}}
diff --git a/docs/zh/14-reference/03-connector/node.mdx b/docs/zh/08-connector/08-node.mdx
similarity index 92%
rename from docs/zh/14-reference/03-connector/node.mdx
rename to docs/zh/08-connector/08-node.mdx
index b089da99d26d0d671641fd0b50119853a04000a9..167ae069d6175873679e8c7cc4ecbb16dafe2ad8 100644
--- a/docs/zh/14-reference/03-connector/node.mdx
+++ b/docs/zh/08-connector/08-node.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 6
sidebar_label: Node.js
title: TDengine Node.js Connector
---
@@ -9,11 +8,11 @@ import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import Preparition from "./_preparition.mdx";
-import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx";
-import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
-import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
-import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx";
-import NodeQuery from "../../07-develop/04-query-data/_js.mdx";
+import NodeInsert from "../07-develop/03-insert-data/_js_sql.mdx";
+import NodeInfluxLine from "../07-develop/03-insert-data/_js_line.mdx";
+import NodeOpenTSDBTelnet from "../07-develop/03-insert-data/_js_opts_telnet.mdx";
+import NodeOpenTSDBJson from "../07-develop/03-insert-data/_js_opts_json.mdx";
+import NodeQuery from "../07-develop/04-query-data/_js.mdx";
`@tdengine/client` 和 `@tdengine/rest` 是 TDengine 的官方 Node.js 语言连接器。 Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。注意:从 TDengine 3.0 开始 Node.js 原生连接器的包名由 `td2.0-connector` 改名为 `@tdengine/client` 而 rest 连接器的包名由 `td2.0-rest-connector` 改为 `@tdengine/rest`。并且不与 TDengine 2.x 兼容。
@@ -28,7 +27,7 @@ REST 连接器支持所有能运行 Node.js 的平台。
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
## 支持的功能特性
@@ -52,7 +51,7 @@ REST 连接器支持所有能运行 Node.js 的平台。
### 安装前准备
- 安装 Node.js 开发环境
-- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。
+- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。
diff --git a/docs/zh/14-reference/03-connector/csharp.mdx b/docs/zh/08-connector/09-csharp.mdx
similarity index 86%
rename from docs/zh/14-reference/03-connector/csharp.mdx
rename to docs/zh/08-connector/09-csharp.mdx
index 723c12932b410e9f85a0f35cd0c0b8273f4f7723..4e49d84835d66622293e607a58699ae93fc7013d 100644
--- a/docs/zh/14-reference/03-connector/csharp.mdx
+++ b/docs/zh/08-connector/09-csharp.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 7
sidebar_label: C#
title: C# Connector
---
@@ -9,16 +8,16 @@ import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Preparition from "./_preparition.mdx"
-import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx"
-import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx"
-import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx"
-import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx"
-import CSQuery from "../../07-develop/04-query-data/_cs.mdx"
-import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
+import CSInsert from "../07-develop/03-insert-data/_cs_sql.mdx"
+import CSInfluxLine from "../07-develop/03-insert-data/_cs_line.mdx"
+import CSOpenTSDBTelnet from "../07-develop/03-insert-data/_cs_opts_telnet.mdx"
+import CSOpenTSDBJson from "../07-develop/03-insert-data/_cs_opts_json.mdx"
+import CSQuery from "../07-develop/04-query-data/_cs.mdx"
+import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。
-`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](/reference/rest-api/) 文档自行编写。
+`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../rest-api/) 文档自行编写。
本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。
@@ -32,7 +31,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
## 支持的功能特性
@@ -49,7 +48,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
* 安装 [.NET SDK](https://dotnet.microsoft.com/download)
* [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装)
-* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
+* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
### 使用 dotnet CLI 安装
@@ -170,9 +169,9 @@ namespace TDengineExample
### 第三方驱动
-`Maikebing.Data.Taos` 是一个 TDengine 的 ADO.NET 连接器,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考:
+[`IoTSharp.Data.Taos`](https://github.com/IoTSharp/EntityFrameworkCore.Taos) 是一个 TDengine 的 ADO.NET 连接器,其中包含了用于EntityFrameworkCore 的提供程序 IoTSharp.EntityFrameworkCore.Taos 和健康检查组件 IoTSharp.HealthChecks.Taos ,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考:
-* 接口下载:
+* 接口下载:
* 用法说明:
## 常见问题
diff --git a/docs/zh/14-reference/03-connector/php.mdx b/docs/zh/08-connector/10-php.mdx
similarity index 95%
rename from docs/zh/14-reference/03-connector/php.mdx
rename to docs/zh/08-connector/10-php.mdx
index 2b7ff2a6febd162fe34ebb737d2f33fbd9fc58a2..5e32c709de89d69b8602b506a9c774cb0a0244f0 100644
--- a/docs/zh/14-reference/03-connector/php.mdx
+++ b/docs/zh/08-connector/10-php.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 1
sidebar_label: PHP
title: PHP Connector
---
@@ -38,7 +37,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
### 安装 TDengine 客户端驱动
-TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤)
+TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤)
### 编译安装 php-tdengine
@@ -61,7 +60,7 @@ phpize && ./configure && make -j && make install
**手动指定 tdengine 目录:**
```shell
-phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/2.4.0.0 && make -j && make install
+phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
```
> `--with-tdengine-dir=` 后跟上 tdengine 目录。
diff --git a/docs/zh/14-reference/03-connector/_01-error-code.md b/docs/zh/08-connector/_01-error-code.md
similarity index 99%
rename from docs/zh/14-reference/03-connector/_01-error-code.md
rename to docs/zh/08-connector/_01-error-code.md
index 53e006e108543805232c8195474f2afd793e7332..3111d4bbf8a071500052309f2e3643f494c1be9a 100644
--- a/docs/zh/14-reference/03-connector/_01-error-code.md
+++ b/docs/zh/08-connector/_01-error-code.md
@@ -1,6 +1,7 @@
---
sidebar_label: 错误码
title: TDengine C/C++ 连接器错误码
+description: C/C++ 连接器的错误码列表和详细说明
---
本文中详细列举了在使用 TDengine C/C++ 连接器时客户端可能得到的错误码以及所要采取的相应动作。其它语言的连接器在使用原生连接方式时也会所得到的返回码返回给连接器的调用者。
diff --git a/docs/zh/14-reference/03-connector/_category_.yml b/docs/zh/08-connector/_category_.yml
similarity index 100%
rename from docs/zh/14-reference/03-connector/_category_.yml
rename to docs/zh/08-connector/_category_.yml
diff --git a/docs/zh/14-reference/03-connector/_linux_install.mdx b/docs/zh/08-connector/_linux_install.mdx
similarity index 96%
rename from docs/zh/14-reference/03-connector/_linux_install.mdx
rename to docs/zh/08-connector/_linux_install.mdx
index c3ddff53cd907b74c02cf3e7b5d5e8da33211fe1..0b1f415f54d28e65a519d35ab94a84b3aa8338ba 100644
--- a/docs/zh/14-reference/03-connector/_linux_install.mdx
+++ b/docs/zh/08-connector/_linux_install.mdx
@@ -4,7 +4,7 @@ import PkgListV3 from "/components/PkgListV3";
- [所有下载](../../releases)
+ [所有下载](../../releases/tdengine)
2. 解压缩软件包
diff --git a/docs/zh/14-reference/03-connector/_preparition.mdx b/docs/zh/08-connector/_preparition.mdx
similarity index 100%
rename from docs/zh/14-reference/03-connector/_preparition.mdx
rename to docs/zh/08-connector/_preparition.mdx
diff --git a/docs/zh/14-reference/03-connector/_verify_linux.mdx b/docs/zh/08-connector/_verify_linux.mdx
similarity index 100%
rename from docs/zh/14-reference/03-connector/_verify_linux.mdx
rename to docs/zh/08-connector/_verify_linux.mdx
diff --git a/docs/zh/14-reference/03-connector/_verify_windows.mdx b/docs/zh/08-connector/_verify_windows.mdx
similarity index 100%
rename from docs/zh/14-reference/03-connector/_verify_windows.mdx
rename to docs/zh/08-connector/_verify_windows.mdx
diff --git a/docs/zh/14-reference/03-connector/_windows_install.mdx b/docs/zh/08-connector/_windows_install.mdx
similarity index 95%
rename from docs/zh/14-reference/03-connector/_windows_install.mdx
rename to docs/zh/08-connector/_windows_install.mdx
index 9fdefa04c04412100f2b0b763a95698e5187608e..3cd688e615fe8bcf1aa472506754366f3a6db011 100644
--- a/docs/zh/14-reference/03-connector/_windows_install.mdx
+++ b/docs/zh/08-connector/_windows_install.mdx
@@ -4,7 +4,8 @@ import PkgListV3 from "/components/PkgListV3";
- [所有下载](../../releases)
+ [所有下载](../../releases/tdengine)
+
2. 执行安装程序,按提示选择默认值,完成安装
3. 安装路径
diff --git a/docs/zh/14-reference/03-connector/connector.webp b/docs/zh/08-connector/connector.webp
similarity index 100%
rename from docs/zh/14-reference/03-connector/connector.webp
rename to docs/zh/08-connector/connector.webp
diff --git a/docs/zh/14-reference/03-connector/03-connector.mdx b/docs/zh/08-connector/index.md
similarity index 98%
rename from docs/zh/14-reference/03-connector/03-connector.mdx
rename to docs/zh/08-connector/index.md
index bdad0b7e25a3a94fa34b14bf47403ba2afd7db8d..17de8e926cd9a3633dc8746b0fb49c38ff8ca61f 100644
--- a/docs/zh/14-reference/03-connector/03-connector.mdx
+++ b/docs/zh/08-connector/index.md
@@ -1,5 +1,7 @@
---
+sidebar_label: 连接器
title: 连接器
+description: 详细介绍各种语言的连接器及 REST API
---
TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
diff --git a/docs/zh/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs/zh/08-connector/tdengine-jdbc-connector.webp
similarity index 100%
rename from docs/zh/14-reference/03-connector/tdengine-jdbc-connector.webp
rename to docs/zh/08-connector/tdengine-jdbc-connector.webp
diff --git a/docs/zh/10-deployment/01-deploy.md b/docs/zh/10-deployment/01-deploy.md
index 22a9c2ff8e68880ce5b0be2e01924eca12707a37..03b4ce30f980cd77e9845076ce9bb35c4474f948 100644
--- a/docs/zh/10-deployment/01-deploy.md
+++ b/docs/zh/10-deployment/01-deploy.md
@@ -1,6 +1,7 @@
---
sidebar_label: 手动部署
title: 集群部署和管理
+description: 使用命令行工具手动部署 TDengine 集群
---
## 准备工作
@@ -70,7 +71,7 @@ serverPort 6030
## 启动集群
-按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 taos shell,从 shell 里执行命令“SHOW DNODES”,如下所示:
+按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 TDengine CLI,在其中执行命令 “SHOW DNODES”,如下所示:
```
taos> show dnodes;
@@ -114,7 +115,7 @@ SHOW DNODES;
任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEp。
firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。
-接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。
+接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 TDengine CLI 如果不加参数,会默认连接由 firstEp 指定的节点。
两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。无法将两个独立的集群合并成为新的集群。
:::
diff --git a/docs/zh/10-deployment/03-k8s.md b/docs/zh/10-deployment/03-k8s.md
index 396b8343243ba824dd87b83fd5f94c14c2059730..0cae59657c2a0199d3452bc37d36f2c537944d21 100644
--- a/docs/zh/10-deployment/03-k8s.md
+++ b/docs/zh/10-deployment/03-k8s.md
@@ -1,6 +1,7 @@
---
sidebar_label: Kubernetes
title: 在 Kubernetes 上部署 TDengine 集群
+description: 利用 Kubernetes 部署 TDengine 集群的详细指南
---
作为面向云原生架构设计的时序数据库,TDengine 支持 Kubernetes 部署。这里介绍如何使用 YAML 文件一步一步从头创建一个 TDengine 集群,并重点介绍 Kubernetes 环境下 TDengine 的常用操作。
@@ -9,6 +10,7 @@ title: 在 Kubernetes 上部署 TDengine 集群
要使用 Kubernetes 部署管理 TDengine 集群,需要做好如下准备工作。
+* 本文适用 Kubernetes v1.5 以上版本
* 本文和下一章使用 minikube、kubectl 和 helm 等工具进行安装部署,请提前安装好相应软件
* Kubernetes 已经安装部署并能正常访问使用或更新必要的容器仓库或其他服务
@@ -365,7 +367,7 @@ kubectl scale statefulsets tdengine --replicas=1
```
-在 taos shell 中的所有数据库操作将无法成功。
+在 TDengine CLI 中的所有数据库操作将无法成功。
```
taos> show dnodes;
diff --git a/docs/zh/10-deployment/05-helm.md b/docs/zh/10-deployment/05-helm.md
index 9a723ff62f23da4906ee268becef1d812c29d797..9a3b21f09296e6f5a8dbd089225b6580b9567586 100644
--- a/docs/zh/10-deployment/05-helm.md
+++ b/docs/zh/10-deployment/05-helm.md
@@ -1,6 +1,7 @@
---
sidebar_label: Helm
title: 使用 Helm 部署 TDengine 集群
+description: 使用 Helm 部署 TDengine 集群的详细指南
---
Helm 是 Kubernetes 的包管理器,上一节使用 Kubernets 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。
@@ -171,70 +172,19 @@ taoscfg:
TAOS_REPLICA: "1"
- # number of days per DB file
- # TAOS_DAYS: "10"
-
- # number of days to keep DB file, default is 10 years.
- #TAOS_KEEP: "3650"
-
- # cache block size (Mbyte)
- #TAOS_CACHE: "16"
-
- # number of cache blocks per vnode
- #TAOS_BLOCKS: "6"
-
- # minimum rows of records in file block
- #TAOS_MIN_ROWS: "100"
-
- # maximum rows of records in file block
- #TAOS_MAX_ROWS: "4096"
-
- #
- # TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core
- #TAOS_NUM_OF_THREADS_PER_CORE: "1.0"
+ # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC
+ #TAOS_NUM_OF_RPC_THREADS: "2"
#
# TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data
#TAOS_NUM_OF_COMMIT_THREADS: "4"
- #
- # TAOS_RATIO_OF_QUERY_CORES:
- # the proportion of total CPU cores available for query processing
- # 2.0: the query threads will be set to double of the CPU cores.
- # 1.0: all CPU cores are available for query processing [default].
- # 0.5: only half of the CPU cores are available for query.
- # 0.0: only one core available.
- #TAOS_RATIO_OF_QUERY_CORES: "1.0"
-
- #
- # TAOS_KEEP_COLUMN_NAME:
- # the last_row/first/last aggregator will not change the original column name in the result fields
- #TAOS_KEEP_COLUMN_NAME: "0"
-
- # enable/disable backuping vnode directory when removing vnode
- #TAOS_VNODE_BAK: "1"
-
# enable/disable installation / usage report
#TAOS_TELEMETRY_REPORTING: "1"
- # enable/disable load balancing
- #TAOS_BALANCE: "1"
-
- # max timer control blocks
- #TAOS_MAX_TMR_CTRL: "512"
-
# time interval of system monitor, seconds
#TAOS_MONITOR_INTERVAL: "30"
- # number of seconds allowed for a dnode to be offline, for cluster only
- #TAOS_OFFLINE_THRESHOLD: "8640000"
-
- # RPC re-try timer, millisecond
- #TAOS_RPC_TIMER: "1000"
-
- # RPC maximum time for ack, seconds.
- #TAOS_RPC_MAX_TIME: "600"
-
# time interval of dnode status reporting to mnode, seconds, for cluster only
#TAOS_STATUS_INTERVAL: "1"
@@ -245,37 +195,7 @@ taoscfg:
#TAOS_MIN_SLIDING_TIME: "10"
# minimum time window, milli-second
- #TAOS_MIN_INTERVAL_TIME: "10"
-
- # maximum delay before launching a stream computation, milli-second
- #TAOS_MAX_STREAM_COMP_DELAY: "20000"
-
- # maximum delay before launching a stream computation for the first time, milli-second
- #TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000"
-
- # retry delay when a stream computation fails, milli-second
- #TAOS_RETRY_STREAM_COMP_DELAY: "10"
-
- # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
- #TAOS_STREAM_COMP_DELAY_RATIO: "0.1"
-
- # max number of vgroups per db, 0 means configured automatically
- #TAOS_MAX_VGROUPS_PER_DB: "0"
-
- # max number of tables per vnode
- #TAOS_MAX_TABLES_PER_VNODE: "1000000"
-
- # the number of acknowledgments required for successful data writing
- #TAOS_QUORUM: "1"
-
- # enable/disable compression
- #TAOS_COMP: "2"
-
- # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
- #TAOS_WAL_LEVEL: "1"
-
- # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
- #TAOS_FSYNC: "3000"
+ #TAOS_MIN_INTERVAL_TIME: "1"
# the compressed rpc message, option:
# -1 (no compression)
@@ -283,17 +203,8 @@ taoscfg:
# > 0 (rpc message body which larger than this value will be compressed)
#TAOS_COMPRESS_MSG_SIZE: "-1"
- # max length of an SQL
- #TAOS_MAX_SQL_LENGTH: "1048576"
-
- # the maximum number of records allowed for super table time sorting
- #TAOS_MAX_NUM_OF_ORDERED_RES: "100000"
-
# max number of connections allowed in dnode
- #TAOS_MAX_SHELL_CONNS: "5000"
-
- # max number of connections allowed in client
- #TAOS_MAX_CONNECTIONS: "5000"
+ #TAOS_MAX_SHELL_CONNS: "50000"
# stop writing logs when the disk size of the log folder is less than this value
#TAOS_MINIMAL_LOG_DIR_G_B: "0.1"
@@ -313,21 +224,8 @@ taoscfg:
# enable/disable system monitor
#TAOS_MONITOR: "1"
- # enable/disable recording the SQL statements via restful interface
- #TAOS_HTTP_ENABLE_RECORD_SQL: "0"
-
- # number of threads used to process http requests
- #TAOS_HTTP_MAX_THREADS: "2"
-
- # maximum number of rows returned by the restful interface
- #TAOS_RESTFUL_ROW_LIMIT: "10240"
-
- # The following parameter is used to limit the maximum number of lines in log files.
- # max number of lines per log filters
- # numOfLogLines 10000000
-
# enable/disable async log
- #TAOS_ASYNC_LOG: "0"
+ #TAOS_ASYNC_LOG: "1"
#
# time of keeping log files, days
@@ -344,25 +242,8 @@ taoscfg:
# debug flag for all log type, take effect when non-zero value\
#TAOS_DEBUG_FLAG: "143"
- # enable/disable recording the SQL in taos client
- #TAOS_ENABLE_RECORD_SQL: "0"
-
# generate core file when service crash
#TAOS_ENABLE_CORE_FILE: "1"
-
- # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
- #TAOS_MAX_BINARY_DISPLAY_WIDTH: "30"
-
- # enable/disable stream (continuous query)
- #TAOS_STREAM: "1"
-
- # in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
- #TAOS_RETRIEVE_BLOCKING_MODEL: "0"
-
- # the maximum allowed query buffer size in MB during query processing for each data node
- # -1 no limit (default)
- # 0 no query allowed, queries are disabled
- #TAOS_QUERY_BUFFER_SIZE: "-1"
```
## 扩容
diff --git a/docs/zh/10-deployment/index.md b/docs/zh/10-deployment/index.md
index 96ac7b176d1125df6cf4763a485c4edba520a48c..4ff1add779c68a7098002dd95dcf28c9dc1acf72 100644
--- a/docs/zh/10-deployment/index.md
+++ b/docs/zh/10-deployment/index.md
@@ -1,5 +1,7 @@
---
+sidebar_label: 部署集群
title: 部署集群
+description: 部署 TDengine 集群的多种方式
---
TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。
diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md
index 628086f5a9f31d15fccdae107b8bd997a6ba1c0b..ee7b3a4715a11346b9a06da20dbc93ef309c0a3d 100644
--- a/docs/zh/12-taos-sql/01-data-type.md
+++ b/docs/zh/12-taos-sql/01-data-type.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 支持的数据类型
-title: 支持的数据类型
+sidebar_label: 数据类型
+title: 数据类型
description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
---
@@ -11,7 +11,7 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类
- 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128`
- 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
-- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度逻辑类似。)
+- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似。
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。
TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。
diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md
index 1675356c49c3435d6f9dad3ccc6b868da929f08f..c76311f008433f36259b08acaf56cafa729550b7 100644
--- a/docs/zh/12-taos-sql/02-database.md
+++ b/docs/zh/12-taos-sql/02-database.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 数据库管理
-title: 数据库管理
+sidebar_label: 数据库
+title: 数据库
description: "创建、删除数据库,查看、修改数据库参数"
---
@@ -71,9 +71,9 @@ database_option: {
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
- 0:表示可以创建多张超级表。
- 1:表示只可以创建一张超级表。
-- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。默认为 0,即落盘后立即删除。-1 表示不删除。
-- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。默认为 0,即落盘后立即删除。-1 表示不删除。
-- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。
+- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。
+- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。
+- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。
- WAL_SEGMENT_SIZE:wal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。
### 创建数据库示例
diff --git a/docs/zh/12-taos-sql/03-table.md b/docs/zh/12-taos-sql/03-table.md
index 0e104bb7b6f09e886ab3c6cb55b1ecd68dfaf1ce..2337f2a272cad45ae762349d2bf355594615cbb5 100644
--- a/docs/zh/12-taos-sql/03-table.md
+++ b/docs/zh/12-taos-sql/03-table.md
@@ -1,5 +1,7 @@
---
-title: 表管理
+title: 表
+sidebar_label: 表
+description: 对表的各种管理操作
---
## 创建表
@@ -8,27 +10,27 @@ title: 表管理
```sql
CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) [table_options]
-
+
CREATE TABLE create_subtable_clause
-
+
CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...)
[TAGS (create_definition [, create_definitionn] ...)]
[table_options]
-
+
create_subtable_clause: {
create_subtable_clause [create_subtable_clause] ...
| [IF NOT EXISTS] [db_name.]tb_name USING [db_name.]stb_name [(tag_name [, tag_name] ...)] TAGS (tag_value [, tag_value] ...)
}
-
+
create_definition:
col_name column_definition
-
+
column_definition:
type_name [comment 'string_value']
-
+
table_options:
table_option ...
-
+
table_option: {
COMMENT 'string_value'
| WATERMARK duration[,duration]
@@ -52,12 +54,13 @@ table_option: {
需要注意的是转义字符中的内容必须是可打印字符。
**参数说明**
+
1. COMMENT:表注释。可用于超级表、子表和普通表。
-2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为0到15分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。
-3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为1毫秒到15分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。
-4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。作用于超级表除TS列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。
-5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括MAX、MIN和SUM。可用于超级表/普通表。
-6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果在持续的TTL时间内,都没有数据写入该表,则TDengine系统会自动删除该表。这个TTL的时间只是一个大概时间,我们系统不保证到了时间一定会将其删除,而只保证存在这样一个机制。TTL单位是天,默认为0,表示不限制。用户需要注意,TTL优先级高于KEEP,即TTL时间满足删除机制时,即使当前数据的存在时间小于KEEP,此表也会被删除。只可用于子表和普通表。
+2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为 0 到 15 分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
+3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为 1 毫秒到 15 分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
+4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。作用于超级表除 TS 列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。
+5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括 MAX、MIN 和 SUM。可用于超级表/普通表。
+6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果创建表时指定了这个参数,当该表的存在时间超过 TTL 指定的时间后,TDengine 自动删除该表。这个 TTL 的时间只是一个大概时间,系统不保证到了时间一定会将其删除,而只保证存在这样一个机制且最终一定会删除。TTL 单位是天,默认为 0,表示不限制,到期时间为表创建时间加上 TTL 时间。
## 创建子表
@@ -87,7 +90,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF
```sql
ALTER TABLE [db_name.]tb_name alter_table_clause
-
+
alter_table_clause: {
alter_table_options
| ADD COLUMN col_name column_type
@@ -95,10 +98,10 @@ alter_table_clause: {
| MODIFY COLUMN col_name column_type
| RENAME COLUMN old_col_name new_col_name
}
-
+
alter_table_options:
alter_table_option ...
-
+
alter_table_option: {
TTL value
| COMMENT 'string_value'
@@ -108,6 +111,7 @@ alter_table_option: {
**使用说明**
对普通表可以进行如下修改操作
+
1. ADD COLUMN:添加列。
2. DROP COLUMN:删除列。
3. MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
@@ -141,15 +145,15 @@ ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name
```sql
ALTER TABLE [db_name.]tb_name alter_table_clause
-
+
alter_table_clause: {
alter_table_options
| SET TAG tag_name = new_tag_value
}
-
+
alter_table_options:
alter_table_option ...
-
+
alter_table_option: {
TTL value
| COMMENT 'string_value'
@@ -157,6 +161,7 @@ alter_table_option: {
```
**使用说明**
+
1. 对子表的列和标签的修改,除了更改标签值以外,都要通过超级表才能进行。
### 修改子表标签值
@@ -167,7 +172,7 @@ ALTER TABLE tb_name SET TAG tag_name=new_tag_value;
## 删除表
-可以在一条SQL语句中删除一个或多个普通表或子表。
+可以在一条 SQL 语句中删除一个或多个普通表或子表。
```sql
DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
@@ -177,7 +182,7 @@ DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
### 显示所有表
-如下SQL语句可以列出当前数据库中的所有表名。
+如下 SQL 语句可以列出当前数据库中的所有表名。
```sql
SHOW TABLES [LIKE tb_name_wildchar];
diff --git a/docs/zh/12-taos-sql/04-stable.md b/docs/zh/12-taos-sql/04-stable.md
index 59d9657694340ae263fb23b8c2b17ede8984426d..95ef405fa780e831628e21766e1b3c3b18265059 100644
--- a/docs/zh/12-taos-sql/04-stable.md
+++ b/docs/zh/12-taos-sql/04-stable.md
@@ -1,6 +1,7 @@
---
-sidebar_label: 超级表管理
-title: 超级表 STable 管理
+sidebar_label: 超级表
+title: 超级表
+description: 对超级表的各种管理操作
---
## 创建超级表
diff --git a/docs/zh/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md
index c91e70c481055b804d88c8911fb454a3dd15b799..59af9c55ed076fb23814a24a5d2429e51d5fc051 100644
--- a/docs/zh/12-taos-sql/05-insert.md
+++ b/docs/zh/12-taos-sql/05-insert.md
@@ -1,6 +1,7 @@
---
sidebar_label: 数据写入
title: 数据写入
+description: 写入数据的详细语法
---
## 写入语法
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 5312d7d2f3597ca63d9d3c43bc2264ca75877fb7..d8ff3f04ed261ade9f8253d57a33e9c56adfefd6 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -1,6 +1,7 @@
---
sidebar_label: 数据查询
title: 数据查询
+description: 查询数据的详细语法
---
## 查询语法
@@ -354,19 +355,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::info
-- 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。
-- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。
-- 目前不能在“连续查询”功能中使用子查询。
+- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表建议起别名,以便于外层查询中方便引用。
- 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
-- 目前内层查询、外层查询均不支持 UNION 操作。
- 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
- 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
- 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
- 计算函数部分:
- - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
- - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
- - 外层查询中不支持 IN 算子,但在内层中可以使用。
- - 外层查询不支持 GROUP BY。
+ - 如果内层查询的结果数据未提供时间戳,那么计算过程隐式依赖时间戳的函数在外层会无法正常工作。例如:INTERP, DERIVATIVE, IRATE, LAST_ROW, FIRST, LAST, TWA, STATEDURATION, TAIL, UNIQUE。
+ - 如果内层查询的结果数据不是有效的时间序列,那么计算过程依赖数据为时间序列的函数在外层会无法正常工作。例如:LEASTSQUARES, ELAPSED, INTERP, DERIVATIVE, IRATE, TWA, DIFF, STATECOUNT, STATEDURATION, CSUM, MAVG, TAIL, UNIQUE。
+ - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:PERCENTILE。
:::
diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md
index bbf6b52eb985bef2decebcacd27d777bd1999b1f..9c5b7f771ecaf52da55a693ed5c789197ab57b05 100644
--- a/docs/zh/12-taos-sql/10-function.md
+++ b/docs/zh/12-taos-sql/10-function.md
@@ -1,6 +1,7 @@
---
sidebar_label: 函数
title: 函数
+description: TDengine 支持的函数列表
toc_max_heading_level: 4
---
@@ -613,6 +614,7 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
**说明**:
- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
+- "t-digest"算法的近似结果对于输入数据顺序敏感,对超级表查询时不同的输入排序结果可能会有微小的误差。
### AVG
@@ -846,7 +848,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
### INTERP
```sql
-SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
+SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT});
```
**功能说明**:返回指定时间截面指定列的记录值或插值。
@@ -855,17 +857,16 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
**适用数据类型**:数值类型。
-**适用于**:表、超级表。
+**适用于**:表和超级表。
**使用说明**
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
-- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。如果没有指定 RANGE,那么满足过滤条件的输入数据中第一条记录的 timestamp 即为 timestamp1,最后一条记录的 timestamp 即为 timestamp2,同样也满足 timestamp1 <= timestamp2。
+- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。
-- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值,如果没有 FILL 字段则默认不插值,即输出为原始记录值或不输出(原始记录不存在)。
-- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 group by tbname 一起使用,当作用嵌套查询外层时内层子查询不能含 GROUP BY 信息。
-- INTERP 的插值结果不受 ORDER BY timestamp 的影响,ORDER BY timestamp 只影响输出结果的排序。
+- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
+- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
### LAST
@@ -917,7 +918,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
**返回数据类型**:同应用的字段。
-**适用数据类型**:数值类型。
+**适用数据类型**:数值类型,时间戳类型。
**适用于**:表和超级表。
@@ -932,7 +933,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
**返回数据类型**:同应用的字段。
-**适用数据类型**:数值类型。
+**适用数据类型**:数值类型,时间戳类型。
**适用于**:表和超级表。
@@ -1167,7 +1168,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
**参数范围**:
-- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
+- oper : `'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。
- val : 数值型
- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。
diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md
index 2dad49ece942d0530c12afa145c2e11682c23fe3..268712e757304fe22848318befd16d1a93de5dac 100644
--- a/docs/zh/12-taos-sql/12-distinguished.md
+++ b/docs/zh/12-taos-sql/12-distinguished.md
@@ -1,15 +1,16 @@
---
-sidebar_label: 时序数据特色查询
-title: 时序数据特色查询
+sidebar_label: 特色查询
+title: 特色查询
+description: TDengine 提供的时序数据特有的查询功能
---
TDengine 是专为时序数据而研发的大数据平台,存储和计算都针对时序数据的特定进行了量身定制,在支持标准 SQL 的基础之上,还提供了一系列贴合时序业务场景的特色查询语法,极大的方便时序场景的应用开发。
-TDengine 提供的特色查询包括标签切分查询和窗口切分查询。
+TDengine 提供的特色查询包括数据切分查询和窗口切分查询。
-## 标签切分查询
+## 数据切分查询
-超级表查询中,当需要针对标签进行数据切分然后在切分出的数据空间内再进行一系列的计算时使用标签切分子句,标签切分的语句如下:
+当需要按一定的维度对数据进行切分然后在切分出的数据空间内再进行一系列的计算时使用数据切分子句,数据切分语句的语法如下:
```sql
PARTITION BY part_list
@@ -17,22 +18,23 @@ PARTITION BY part_list
part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。
-当 PARTITION BY 和标签一起使用时,TDengine 按如下方式处理标签切分子句:
+TDengine 按如下方式处理数据切分子句:
-- 标签切分子句位于 WHERE 子句之后,且不能和 JOIN 子句一起使用。
-- 标签切分子句将超级表数据按指定的标签组合进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
-- 标签切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
+- 数据切分子句位于 WHERE 子句之后。
+- 数据切分子句将表数据按指定的维度进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
+- 数据切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
```sql
select max(current) from meters partition by location interval(10m)
```
+数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。
## 窗口切分查询
TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下:
```sql
-SELECT function_list FROM tb_name
+SELECT select_list FROM tb_name
[WHERE where_condition]
[SESSION(ts_col, tol_val)]
[STATE_WINDOW(col)]
@@ -42,19 +44,15 @@ SELECT function_list FROM tb_name
在上述语法中的具体限制如下
-### 窗口切分查询中使用函数的限制
-
-- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。
-- 此外 LAST_ROW 查询也不能与窗口聚合同时出现。
-- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。
-
### 窗口子句的规则
-- 窗口子句位于标签切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
+- 窗口子句位于数据切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
- 窗口子句将数据按窗口进行切分,对每个窗口进行 SELECT 列表中的表达式的计算,SELECT 列表中的表达式只能包含:
- 常量。
- - 聚集函数。
+ - _wstart伪列、_wend伪列和_wduration伪列。
+ - 聚集函数(包括选择函数和可以由参数确定输出行数的时序特有函数)。
- 包含上面表达式的表达式。
+ - 且至少包含一个聚集函数。
- 窗口子句不可以和 GROUP BY 子句一起使用。
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
@@ -73,7 +71,7 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填
1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
-3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
+3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 PARTITION BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 PARTITION BY 语句分组,则返回结果中每个 PARTITION 内不按照时间序列严格单调递增。
:::
@@ -105,7 +103,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
### 状态窗口
-使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
+使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。
![TDengine Database 时间窗口示意图](./timewindow-3.webp)
@@ -121,7 +119,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
![TDengine Database 时间窗口示意图](./timewindow-2.webp)
-在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
+在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。
```
diff --git a/docs/zh/12-taos-sql/13-tmq.md b/docs/zh/12-taos-sql/13-tmq.md
index b05d2bf680e2db5db08b2e86d98e2e3018078ddf..571300ad8cbfb031e38f330c0773ec6ee6f11e32 100644
--- a/docs/zh/12-taos-sql/13-tmq.md
+++ b/docs/zh/12-taos-sql/13-tmq.md
@@ -1,6 +1,7 @@
---
sidebar_label: 数据订阅
title: 数据订阅
+description: TDengine 消息队列提供的数据订阅功能
---
TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。
diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md
index a967299e4093a4a8654d7aaf1b8c3914726aeadf..cd726e0a0ea644f575e16c656eeb4bb2cabf425d 100644
--- a/docs/zh/12-taos-sql/14-stream.md
+++ b/docs/zh/12-taos-sql/14-stream.md
@@ -1,6 +1,7 @@
---
sidebar_label: 流式计算
title: 流式计算
+description: 流式计算的相关 SQL 的详细语法
---
@@ -18,7 +19,7 @@ stream_options: {
其中 subquery 是 select 普通查询语法的子集:
```sql
-subquery: SELECT [DISTINCT] select_list
+subquery: SELECT select_list
from_clause
[WHERE condition]
[PARTITION BY tag_list]
@@ -37,13 +38,13 @@ window_clause: {
其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。
-窗口的定义与时序数据特色查询中的定义完全相同。
+窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished)
例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。
```sql
CREATE STREAM avg_vol_s INTO avg_vol AS
-SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
+SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
```
## 流式计算的 partition
@@ -57,7 +58,7 @@ SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVA
## 删除流式计算
```sql
-DROP STREAM [IF NOT EXISTS] stream_name;
+DROP STREAM [IF EXISTS] stream_name;
```
仅删除流式计算任务,由流式计算写入的数据不会被删除。
diff --git a/docs/zh/12-taos-sql/16-operators.md b/docs/zh/12-taos-sql/16-operators.md
index 22b78455fb35e9ebe5978b30505819e1a2b678c8..48e9991799abf99ca868fc30e34f0435054afa0b 100644
--- a/docs/zh/12-taos-sql/16-operators.md
+++ b/docs/zh/12-taos-sql/16-operators.md
@@ -1,6 +1,7 @@
---
sidebar_label: 运算符
title: 运算符
+description: TDengine 支持的所有运算符
---
## 算术运算符
diff --git a/docs/zh/12-taos-sql/17-json.md b/docs/zh/12-taos-sql/17-json.md
index 4a4a8cca732ac433ba5ada1ec3805ebfa663edb3..18c25cfe230f81bf0b0e421634c1a768ae8e4628 100644
--- a/docs/zh/12-taos-sql/17-json.md
+++ b/docs/zh/12-taos-sql/17-json.md
@@ -1,6 +1,7 @@
---
-sidebar_label: JSON 类型使用说明
-title: JSON 类型使用说明
+sidebar_label: JSON 类型
+title: JSON 类型
+description: 对 JSON 类型如何使用的详细说明
---
diff --git a/docs/zh/12-taos-sql/18-escape.md b/docs/zh/12-taos-sql/18-escape.md
index 756e5c81591e7414827fdc65e228cfafc96214ad..5e0d292d396fdb54bd3df553544353a900415283 100644
--- a/docs/zh/12-taos-sql/18-escape.md
+++ b/docs/zh/12-taos-sql/18-escape.md
@@ -1,5 +1,7 @@
---
-title: 转义字符说明
+title: 转义字符
+sidebar_label: 转义字符
+description: TDengine 中使用转义字符的详细规则
---
## 转义字符表
@@ -15,9 +17,6 @@ title: 转义字符说明
| `\%` | % 规则见下 |
| `\_` | \_ 规则见下 |
-:::note
-转义符的功能从 2.4.0.4 版本开始
-
:::
## 转义字符使用规则
diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md
index ff552fc9771f5b428554acc62e9aeac03a305ecc..a9743adddabe96440ffca8c8585787081d29398f 100644
--- a/docs/zh/12-taos-sql/19-limit.md
+++ b/docs/zh/12-taos-sql/19-limit.md
@@ -1,6 +1,7 @@
---
-sidebar_label: 命名与边界限制
-title: 命名与边界限制
+sidebar_label: 命名与边界
+title: 命名与边界
+description: 合法字符集和命名中的限制规则
---
## 名称命名规则
@@ -30,7 +31,7 @@ title: 命名与边界限制
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
- 标签名最大长度为 64
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
-- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576
+- SQL 语句最大长度 1048576 个字符
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
- 数据库的副本数只能设置为 1 或 3
diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md
index cac29d7863ff77a6ec15bb9bddedd006317b719c..92c814067e8823b89ff4bd81198d2b1950d20d51 100644
--- a/docs/zh/12-taos-sql/20-keywords.md
+++ b/docs/zh/12-taos-sql/20-keywords.md
@@ -1,6 +1,7 @@
---
sidebar_label: 保留关键字
-title: TDengine 保留关键字
+title: 保留关键字
+description: TDengine 保留关键字的详细列表
---
## 保留关键字
diff --git a/docs/zh/12-taos-sql/21-node.md b/docs/zh/12-taos-sql/21-node.md
index 4816daf42042c0607aebf37c8b57961e5b1927fe..d47dc8198f41e7ee6e90624b0928c6bd215bb26d 100644
--- a/docs/zh/12-taos-sql/21-node.md
+++ b/docs/zh/12-taos-sql/21-node.md
@@ -1,6 +1,7 @@
---
sidebar_label: 集群管理
title: 集群管理
+description: 管理集群的 SQL 命令的详细解析
---
组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node),qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。
diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md
index 8139b2fc55d420edfb766aab6ed06477fbd3621f..c1ffc4a757500276f348d08cd577f63072dfece2 100644
--- a/docs/zh/12-taos-sql/22-meta.md
+++ b/docs/zh/12-taos-sql/22-meta.md
@@ -1,6 +1,7 @@
---
sidebar_label: 元数据
-title: 存储元数据的 Information_Schema 数据库
+title: 元数据
+description: Information_Schema 数据库中存储了系统中所有的元数据信息
---
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
@@ -245,3 +246,35 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们
| 1 | dnode_id | INT | dnode 的 ID |
| 2 | name | BINARY(32) | 配置项名称 |
| 3 | value | BINARY(64) | 该配置项的值 |
+
+## INS_TOPICS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :---------: | ------------ | ------------------------------ |
+| 1 | topic_name | BINARY(192) | topic 名称 |
+| 2 | db_name | BINARY(64) | topic 相关的 DB |
+| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
+| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
+
+## INS_SUBSCRIPTIONS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :------------: | ------------ | ------------------------ |
+| 1 | topic_name | BINARY(204) | 被订阅的 topic |
+| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
+| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
+| 4 | consumer_id | BIGINT | 消费者的唯一 id |
+
+## INS_STREAMS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :----------: | ------------ | --------------------------------------- |
+| 1 | stream_name | BINARY(64) | 流计算名称 |
+| 2 | create_time | TIMESTAMP | 创建时间 |
+| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
+| 4 | status | BIANRY(20) | 流当前状态 |
+| 5 | source_db | BINARY(64) | 源数据库 |
+| 6 | target_db | BIANRY(64) | 目的数据库 |
+| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
+| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
+| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
diff --git a/docs/zh/12-taos-sql/23-perf.md b/docs/zh/12-taos-sql/23-perf.md
index ac852ee1506ce8da24c036c61ce96fa4eecaf1cb..d4ee0e178c02e65eb3f1ceaa73e170893f65cc88 100644
--- a/docs/zh/12-taos-sql/23-perf.md
+++ b/docs/zh/12-taos-sql/23-perf.md
@@ -1,6 +1,7 @@
---
sidebar_label: 统计数据
-title: 存储统计数据的 Performance_Schema 数据库
+title: 统计数据
+description: Performance_Schema 数据库中存储了系统中的各种统计信息
---
TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和表结构。
@@ -61,15 +62,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 12 | sub_status | BINARY(1000) | 子查询状态 |
| 13 | sql | BINARY(1024) | SQL 语句 |
-## PERF_TOPICS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :---------: | ------------ | ------------------------------ |
-| 1 | topic_name | BINARY(192) | topic 名称 |
-| 2 | db_name | BINARY(64) | topic 相关的 DB |
-| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
-| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
-
## PERF_CONSUMERS
| # | **列名** | **数据类型** | **说明** |
@@ -83,15 +75,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
| 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 |
-## PERF_SUBSCRIPTIONS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :------------: | ------------ | ------------------------ |
-| 1 | topic_name | BINARY(204) | 被订阅的 topic |
-| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
-| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
-| 4 | consumer_id | BIGINT | 消费者的唯一 id |
-
## PERF_TRANS
| # | **列名** | **数据类型** | **说明** |
@@ -113,17 +96,3 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 2 | create_time | TIMESTAMP | sma 创建时间 |
| 3 | stable_name | BINARY(192) | sma 所属的超级表名称 |
| 4 | vgroup_id | INT | sma 专属的 vgroup 名称 |
-
-## PERF_STREAMS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :----------: | ------------ | --------------------------------------- |
-| 1 | stream_name | BINARY(64) | 流计算名称 |
-| 2 | create_time | TIMESTAMP | 创建时间 |
-| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
-| 4 | status | BIANRY(20) | 流当前状态 |
-| 5 | source_db | BINARY(64) | 源数据库 |
-| 6 | target_db | BIANRY(64) | 目的数据库 |
-| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
-| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
-| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md
index 781f94324c78e7975abde33803cffdb914da020c..31b7c085a1ba97630223c16e06022ec9dfd9ea50 100644
--- a/docs/zh/12-taos-sql/24-show.md
+++ b/docs/zh/12-taos-sql/24-show.md
@@ -1,19 +1,10 @@
---
sidebar_label: SHOW 命令
-title: 使用 SHOW 命令查看系统元数据
+title: SHOW 命令
+description: SHOW 命令的完整列表
---
-除了使用 `select` 语句查询 `INFORMATION_SCHEMA` 数据库中的表获得系统中的各种元数据、系统信息和状态之外,也可以用 `SHOW` 命令来实现同样的目的。
-
-## SHOW ACCOUNTS
-
-```sql
-SHOW ACCOUNTS;
-```
-
-显示当前系统中所有租户的信息。
-
-注:企业版独有
+SHOW 命令可以用来获取简要的系统信息。若想获取系统中详细的各种元数据、系统信息和状态,请使用 select 语句查询 INFORMATION_SCHEMA 数据库中的表。
## SHOW APPS
@@ -194,7 +185,7 @@ SHOW STREAMS;
SHOW SUBSCRIPTIONS;
```
-显示当前数据库下的所有的订阅关系
+显示当前系统内所有的订阅关系
## SHOW TABLES
diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md
index c41a3fcfc9ee42e56e48082da5b6420073d92cdf..7fb944710125de6fe4d6efcedbb0677b33e1fd0f 100644
--- a/docs/zh/12-taos-sql/25-grant.md
+++ b/docs/zh/12-taos-sql/25-grant.md
@@ -1,6 +1,7 @@
---
sidebar_label: 权限管理
title: 权限管理
+description: 企业版中才具有的权限管理功能
---
本节讲述如何在 TDengine 中进行权限管理的相关操作。
@@ -8,14 +9,51 @@ title: 权限管理
## 创建用户
```sql
-CREATE USER use_name PASS 'password';
+CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
```
创建用户。
-use_name最长为23字节。
+use_name 最长为 23 字节。
-password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+password 最长为 128 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+
+SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
+
+例如,创建密码为123456且可以查看系统信息的用户test如下:
+
+```sql
+taos> create user test pass '123456' sysinfo 1;
+Query OK, 0 of 0 rows affected (0.001254s)
+```
+
+## 查看用户
+
+```sql
+SHOW USERS;
+```
+
+查看用户信息。
+
+```sql
+taos> show users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001657s)
+```
+
+也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
+
+```sql
+taos> select * from information_schema.ins_users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001953s)
+```
## 删除用户
@@ -36,9 +74,15 @@ alter_user_clause: {
```
- PASS:修改用户密码。
-- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。
-- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。
+- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
+- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
+
+例如,禁用 test 用户:
+```sql
+taos> alter user test enable 0;
+Query OK, 0 of 0 rows affected (0.001160s)
+```
## 授权
@@ -61,7 +105,7 @@ priv_level : {
}
```
-对用户授权。
+对用户授权。授权功能只包含在企业版中。
授权级别支持到DATABASE,权限有READ和WRITE两种。
@@ -91,4 +135,4 @@ priv_level : {
```
-收回对用户的授权。
+收回对用户的授权。授权功能只包含在企业版中。
diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md
index 7ddcad298b4b9eb4191abded0663055620b741c3..6dc1b6eb5fbe346ae65993e4e290566179b0e6ee 100644
--- a/docs/zh/12-taos-sql/26-udf.md
+++ b/docs/zh/12-taos-sql/26-udf.md
@@ -1,6 +1,7 @@
---
sidebar_label: 自定义函数
-title: 用户自定义函数
+title: 自定义函数
+description: 使用 UDF 的详细指南
---
除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。
diff --git a/docs/zh/12-taos-sql/27-index.md b/docs/zh/12-taos-sql/27-index.md
index 2c0907723e76f304566e6a19bdef2d63225f903f..aa84140296832f79a6498d0da2b5a8f500cd1e90 100644
--- a/docs/zh/12-taos-sql/27-index.md
+++ b/docs/zh/12-taos-sql/27-index.md
@@ -1,6 +1,7 @@
---
sidebar_label: 索引
-title: 使用索引
+title: 索引
+description: 索引功能的使用细节
---
TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。
diff --git a/docs/zh/12-taos-sql/28-recovery.md b/docs/zh/12-taos-sql/28-recovery.md
index 72b220b8ff44917831ac16301237702c991b9b15..582c3739073513df4ceb212080805136947e62d4 100644
--- a/docs/zh/12-taos-sql/28-recovery.md
+++ b/docs/zh/12-taos-sql/28-recovery.md
@@ -1,6 +1,7 @@
---
sidebar_label: 异常恢复
title: 异常恢复
+description: 如何终止出现问题的连接、查询和事务以使系统恢复正常
---
在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。
diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md
index d653c59a5cd1309fbdcd6ef7e3706e33c4a43dee..e63825045d5ddc26d289af4bbd7fa808719bb99c 100644
--- a/docs/zh/12-taos-sql/29-changes.md
+++ b/docs/zh/12-taos-sql/29-changes.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 3.0 版本语法变更
-title: 3.0 版本语法变更
+sidebar_label: 语法变更
+title: 语法变更
description: "TDengine 3.0 版本的语法变更说明"
---
diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md
index 821679551c453b1a3f2937ac5d2409dd733cd593..739d26b2240ddfcf32a269015f5c8915f4854f33 100644
--- a/docs/zh/12-taos-sql/index.md
+++ b/docs/zh/12-taos-sql/index.md
@@ -1,11 +1,11 @@
---
-title: TAOS SQL
-description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容"
+title: TDengine SQL
+description: 'TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容'
---
-本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
+本文档说明 TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
-TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
+TDengine SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TDengine SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TDengine SQL 语句的最大长度为 1M。TDengine SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
本章节 SQL 语法遵循如下约定:
diff --git a/docs/zh/14-reference/02-rest-api/_category_.yml b/docs/zh/14-reference/02-rest-api/_category_.yml
deleted file mode 100644
index 57a20d8458e937f60c41806be4392ebb2d13e0f7..0000000000000000000000000000000000000000
--- a/docs/zh/14-reference/02-rest-api/_category_.yml
+++ /dev/null
@@ -1 +0,0 @@
-label: REST API
diff --git a/docs/zh/14-reference/04-taosadapter.md b/docs/zh/14-reference/04-taosadapter.md
index 9baafb9b9582445280d5c73c891694e2134d15fb..71bf5f4223ae97cf2c1153aaea3b8f946e213522 100644
--- a/docs/zh/14-reference/04-taosadapter.md
+++ b/docs/zh/14-reference/04-taosadapter.md
@@ -30,7 +30,7 @@ taosAdapter 提供以下功能:
### 安装 taosAdapter
-taosAdapter 从 TDengine v2.4.0.0 版本开始成为 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server(taosAdapter 包含在 v2.4.0.0 及以上版本)安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD-CN.md)文档。
+taosAdapter 是 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server 安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD-CN.md)文档。
### start/stop taosAdapter
@@ -156,7 +156,7 @@ AllowWebSockets
## 功能列表
- RESTful 接口
- [https://docs.taosdata.com/reference/rest-api/](https://docs.taosdata.com/reference/rest-api/)
+ [RESTful API](../../connector/rest-api)
- 兼容 InfluxDB v1 写接口
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
- 兼容 OpenTSDB JSON 和 telnet 格式写入
@@ -179,7 +179,7 @@ AllowWebSockets
### TDengine RESTful 接口
-您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](/reference/rest-api/)。
+您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](../../connector/rest-api/)。
### InfluxDB
@@ -329,4 +329,4 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1
| 3 | telegrafUseFieldNum | 请参考 taosAdapter telegraf 配置方法 |
| 4 | restfulRowLimit | restfulRowLimit | 内嵌 httpd 默认输出 10240 行数据,最大允许值为 102400。taosAdapter 也提供 restfulRowLimit 但是默认不做限制。您可以根据实际场景需求进行配置 |
| 5 | httpDebugFlag | 不适用 | httpdDebugFlag 对 taosAdapter 不起作用 |
-| 6 | httpDBNameMandatory | 不适用 | taosAdapter 要求 URL 中必须指定数据库名 |
\ No newline at end of file
+| 6 | httpDBNameMandatory | 不适用 | taosAdapter 要求 URL 中必须指定数据库名 |
diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md
index f84ec65b4c8574c0812567a65213d7605b306c99..0d6aad62401daf76737caf803461c187189cb76f 100644
--- a/docs/zh/14-reference/05-taosbenchmark.md
+++ b/docs/zh/14-reference/05-taosbenchmark.md
@@ -405,37 +405,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
订阅子表或者普通表的配置参数在 `specified_table_query` 中设置。
-- **threads** : 执行 SQL 的线程数,默认为 1。
-
-- **interval** : 执行订阅的时间间隔,单位为秒,默认为 0。
-
-- **restart** : "yes" 表示开始新的订阅,"no" 表示继续之前的订阅,默认值为 "no"。
-
-- **keepProgress** : "yes" 表示保留订阅进度,"no" 表示不保留,默认值为 "no"。
-
-- **resubAfterConsume** : "yes" 表示取消之前的订阅然后再次订阅, "no" 表示继续之前的订阅,默认值为 "no"。
+- **threads/concurrent** : 执行 SQL 的线程数,默认为 1。
- **sqls** :
- **sql** : 执行的 SQL 命令,必填。
- - **result** : 保存查询结果的文件,未指定则不保存。
-
-#### 订阅超级表的配置参数
-
-订阅超级表的配置参数在 `super_table_query` 中设置。
-
-- **stblname** : 要订阅的超级表名称,必填。
-
-- **threads** : 执行 SQL 的线程数,默认为 1。
-
-- **interval** : 执行订阅的时间间隔,单位为秒,默认为 0。
-
-- **restart** : "yes" 表示开始新的订阅,"no" 表示继续之前的订阅,默认值为 "no"。
-
-- **keepProgress** : "yes" 表示保留订阅进度,"no" 表示不保留,默认值为 "no"。
-
-- **resubAfterConsume** : "yes" 表示取消之前的订阅然后再次订阅, "no" 表示继续之前的订阅,默认值为 "no"。
-
-- **sqls** :
- - **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。
- 替换为超级表中所有的子表名。
- - **result** : 保存查询结果的文件,未指定则不保存。
diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp
index a78e18028a94c2f6a783b08d992a25c791527407..3bc0d960f1db45ee8d2adcee26de89334e681956 100644
Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp
index b152418d0902b8ebdf62ebce6705c10dd5ab4fbf..f5a602d3f9dcecb64ded5e1f463ba460daab0024 100644
Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp
index f58f48b7f17375cb8e62e7c0126ca3aea56a13f6..f155fa42a0fb5df71ee48c8c65a8c7d8851ddc3e 100644
Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp
index 00afcce013602dce0da17bfd033f65aaa8e43bb7..dc0b85e262bd4340e986a42105e0ff9838d12fa6 100644
Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp
index 567e5694f9d7a035a3eb354493d3df8ed64db251..342c8cfc0a8e852e7cd092aff453ed1fd2ec85a2 100644
Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp
index 8666193f59497180574fd2786266e5baabbe9761..942130d4fabf7944c7add10acb3bb42ca7f51e0f 100644
Binary files a/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp and b/docs/zh/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp
index 06d0ff6ed50091a6340508bc5b2b3f78b65dcb18..d7fc9e233acd1a4b1bbb940b13bc4296c261a33a 100644
Binary files a/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp and b/docs/zh/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp
index fb7958f1b9fbd43c8f63136024842790e711c490..ae2a1e8e9b7b63a68d56dfcd2187eca614da9a3d 100644
Binary files a/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp and b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/import_dashboard_view.webp b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard_view.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1b10e41c75fbbb9a30bce4aa8d1adb8216fbe127
Binary files /dev/null and b/docs/zh/14-reference/07-tdinsight/assets/import_dashboard_view.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/assets/select_dashboard_db.webp b/docs/zh/14-reference/07-tdinsight/assets/select_dashboard_db.webp
new file mode 100644
index 0000000000000000000000000000000000000000..956132e37e9df255d3ff82654fd357bec001e695
Binary files /dev/null and b/docs/zh/14-reference/07-tdinsight/assets/select_dashboard_db.webp differ
diff --git a/docs/zh/14-reference/07-tdinsight/index.md b/docs/zh/14-reference/07-tdinsight/index.mdx
similarity index 67%
rename from docs/zh/14-reference/07-tdinsight/index.md
rename to docs/zh/14-reference/07-tdinsight/index.mdx
index 5990a831b8bc1788deaddfb38f717f2723969362..ecd63621432794e27fd80b88e864590c83e9b333 100644
--- a/docs/zh/14-reference/07-tdinsight/index.md
+++ b/docs/zh/14-reference/07-tdinsight/index.mdx
@@ -1,21 +1,31 @@
---
-title: TDinsight - 基于Grafana的TDengine零依赖监控解决方案
+title: TDinsight
sidebar_label: TDinsight
+description: 基于Grafana的TDengine零依赖监控解决方案
---
-TDinsight 是使用内置监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。
+TDinsight 是使用监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。
-TDengine 启动后,会自动创建一个监测数据库 `log`,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。
+TDengine 通过 [taosKeeper](../taosKeeper) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入指定数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。
## 系统要求
-要部署 TDinsight,需要一个单节点的 TDengine 服务器或一个多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 2.3.3.0 及以上,并启用 `log` 数据库(`monitor = 1`)。
+- 单节点的 TDengine 服务器或多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 3.0.0.0 及以上,并开启监控服务,具体配置请参考:[TDengine 监控配置](../config/#监控相关)。
+- taosAdapter 已经安装并正常运行。具体细节请参考:[taosAdapter 使用手册](../taosadapter)
+- taosKeeper 已安装并正常运行。具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
+
+记录以下信息:
+
+- taosAdapter 集群 REST API 地址,如:`http://tdengine.local:6041`。
+- taosAdapter 集群认证信息,可使用用户名及密码。
+- taosKeeper 记录监控指标的数据库名称。
## 安装 Grafana
-我们建议在此处使用最新的[Grafana] 7 或 8 版本。您可以在任何[支持的操作系统](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems)中,按照 [Grafana 官方文档安装说明](https://grafana.com/docs/grafana/latest/installation/) 安装 [Grafana]。
+我们建议在此处使用最新的[Grafana] 8 或 9 版本。您可以在任何[支持的操作系统](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems)中,按照 [Grafana 官方文档安装说明](https://grafana.com/docs/grafana/latest/installation/) 安装 [Grafana]。
-### 在 Debian 或 Ubuntu 上安装 Grafana
+
+
对于 Debian 或 Ubuntu 操作系统,建议使用 Grafana 镜像仓库。使用如下命令从零开始安装:
@@ -31,6 +41,8 @@ sudo apt-get install grafana
```
### 在 CentOS / RHEL 上安装 Grafana
+
+
您可以从官方 YUM 镜像仓库安装。
@@ -59,7 +71,12 @@ sudo yum install \
https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm
```
-## 自动部署 TDinsight
+
+
+
+
+
+
我们提供了一个自动化安装脚本 [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) 脚本以便用户快速进行安装配置。
@@ -71,7 +88,7 @@ chmod +x TDinsight.sh
./TDinsight.sh
```
-这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://grafana.com/grafana/dashboards/15167) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。
+这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。
假设您在同一台主机上使用 TDengine 和 Grafana 的默认服务。运行 `./TDinsight.sh` 并打开 Grafana 浏览器窗口就可以看到 TDinsight 仪表盘了。
@@ -106,18 +123,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
-E, --external-notifier Apply external notifier uid to TDinsight dashboard.
-Aliyun SMS as Notifier:
--s, --sms-enabled To enable tdengine-datasource plugin builtin Aliyun SMS webhook.
--N, --sms-notifier-name Provisioning notifier name.[default: TDinsight Builtin SMS]
--U, --sms-notifier-uid Provisioning notifier uid, use lowercase notifier name by default.
--D, --sms-notifier-is-default Set notifier as default.
--I, --sms-access-key-id Aliyun SMS access key id
--K, --sms-access-key-secret Aliyun SMS access key secret
--S, --sms-sign-name Sign name
--C, --sms-template-code Template code
--T, --sms-template-param Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
--B, --sms-phone-numbers Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx"
--L, --sms-listen-addr [default: 127.0.0.1:9100]
```
大多数命令行选项都可以通过环境变量获得同样的效果。
@@ -136,17 +141,6 @@ Aliyun SMS as Notifier:
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] |
| -e | --tdinsight-可编辑 | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] |
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
-| -s | --sms-enabled | SMS_ENABLED | 启用阿里云短信 webhook 内置的 tdengine-datasource 插件。 |
-| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | 供应通知程序名称。[默认:`TDinsight Builtin SMS`] |
-| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`,默认使用程序名称的小写,其他字符用 “-” 代替。 |
-| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | 将内置短信通知设置为默认值。 |
-| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | 阿里云短信访问密钥 id |
-| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | 阿里云短信访问秘钥 |
-| -S | --sms-sign-name | SMS_SIGN_NAME | 签名 |
-| -C | --sms-template-code | SMS_TEMPLATE_CODE | 模板代码 |
-| -T | --sms-template-param | SMS_TEMPLATE_PARAM | 模板参数的 JSON 模板 |
-| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | 逗号分隔的手机号列表,例如`"189xxxxxxxx,132xxxxxxxx"` |
-| -L | --sms-listen-addr | SMS_LISTEN_ADDR | 内置 SMS webhook 监听地址,默认为`127.0.0.1:9100` |
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
@@ -166,31 +160,18 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
-如果你想使用[阿里云短信](https://www.aliyun.com/product/sms)服务作为通知渠道,你应该使用`-s`标志启用并添加以下参数:
-
-- `-N`:Notification Channel 名,默认为`TDinsight Builtin SMS`。
-- `-U`:Channel uid,默认是 `name` 的小写,任何其他字符都替换为 - ,对于默认的 `-N`,其 uid 为 `tdinsight-builtin-sms`。
-- `-I`:阿里云短信访问密钥 id。
-- `-K`:阿里云短信访问秘钥。
-- `-S`:阿里云短信签名。
-- `-C`:阿里云短信模板 ID。
-- `-T`:阿里云短信模板参数,为 JSON 格式模板,示例如下 `'{"alarm_level":"%s","time":"%s","name":"%s","content":"%s "}'`。有四个参数:告警级别、时间、名称和告警内容。
-- `-B`:电话号码列表,以逗号`,`分隔。
-
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
```bash
sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1'
-# 如果使用内置短信通知
-sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \
- -s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611
```
请注意,配置数据源、通知 Channel 和仪表盘在前端是不可更改的。您应该再次通过此脚本更新配置或手动更改 `/etc/grafana/provisioning` 目录(这是 Grafana 的默认目录,根据需要使用`-P`选项更改)中的配置文件。
特别地,当您使用 Grafana Cloud 或其他组织时,`-O` 可用于设置组织 ID。 `-G` 可指定 Grafana 插件安装目录。 `-e` 参数将仪表盘设置为可编辑。
-## 手动设置 TDinsight
+
+
### 安装 TDengine 数据源插件
@@ -247,23 +228,30 @@ sudo systemctl enable grafana-server
![TDengine Database TDinsight 数据源测试](./assets/howto-add-datasource-test.webp)
+
+
+
### 导入仪表盘
-指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。
+在配置 TDengine 数据源界面,点击 **Dashboards** tab。
![TDengine Database TDinsight 导入仪表盘和配置](./assets/import_dashboard.webp)
-在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。
+选择 `TDengine for 3.x`,并点击 `import`。
+
+导入完成后,在搜索界面已经出现了 **TDinsight for 3.x** dashboard。
+
+![TDengine Database TDinsight 查看导入结果](./assets/import_dashboard_view.webp)
-![通过 grafana.com 导入](./assets/import-dashboard-15167.webp)
+进入 TDinsight for 3.x dashboard 后,选择 taosKeeper 中设置的记录监控指标的数据库。
-导入完成后,TDinsight 的完整页面视图如下所示。
+![TDengine Database TDinsight 选择数据库](./assets/select_dashboard_db.webp)
-![TDengine Database TDinsight 显示](./assets/TDinsight-full.webp)
+然后可以看到监控结果。
## TDinsight 仪表盘详细信息
-TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster)或数据库的使用情况和状态。
+TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态,比如 dnodes、 mnodes、 vnodes 和数据库等。
指标详情如下:
@@ -285,7 +273,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
- **Measuring Points Used**:启用告警规则的测点数用量(社区版无数据,默认情况下是健康的)。
- **Grants Expire Time**:启用告警规则的企业版过期时间(社区版无数据,默认情况是健康的)。
- **Error Rate**:启用警报的集群总合错误率(每秒平均错误数)。
-- **Variables**:`show variables` 表格展示。
### DNodes 状态
@@ -294,7 +281,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
- **DNodes Status**:`show dnodes` 的简单表格视图。
- **DNodes Lifetime**:从创建 dnode 开始经过的时间。
- **DNodes Number**:DNodes 数量变化。
-- **Offline Reason**:如果有任何 dnode 状态为离线,则以饼图形式展示离线原因。
### MNode 概述
@@ -309,7 +295,6 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。
2. **Requests (Selects)**:查询请求数及变化率(count of second)。
-3. **Requests (HTTP)**:HTTP 请求数和请求速率(count of second)。
### 数据库
@@ -319,9 +304,8 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
1. **STables**:超级表数量。
2. **Total Tables**:所有表数量。
-3. **Sub Tables**:所有超级表子表的数量。
-4. **Tables**:所有普通表数量随时间变化图。
-5. **Tables Number Foreach VGroups**:每个 VGroups 包含的表数量。
+3. **Tables**:所有普通表数量随时间变化图。
+4. **Tables Number Foreach VGroups**:每个 VGroups 包含的表数量。
### DNode 资源使用情况
@@ -356,12 +340,11 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes
支持监控 taosAdapter 请求统计和状态详情。包括:
-1. **http_request**: 包含总请求数,请求失败数以及正在处理的请求数
-2. **top 3 request endpoint**: 按终端分组,请求排名前三的数据
-3. **Memory Used**: taosAdapter 内存使用情况
-4. **latency_quantile(ms)**: (1, 2, 5, 9, 99)阶段的分位数
-5. **top 3 failed request endpoint**: 按终端分组,请求失败排名前三的数据
-6. **CPU Used**: taosAdapter CPU 使用情况
+1. **http_request_inflight**: 即时处理请求数
+2. **http_request_total**: 请求总数。
+3. **http_request_fail**: 请求总数。
+4. **CPU Used**: taosAdapter CPU 使用情况。
+5. **Memory Used**: taosAdapter 内存使用情况。
## 升级
@@ -403,13 +386,6 @@ services:
TDENGINE_API: ${TDENGINE_API}
TDENGINE_USER: ${TDENGINE_USER}
TDENGINE_PASS: ${TDENGINE_PASS}
- SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID}
- SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET}
- SMS_SIGN_NAME: ${SMS_SIGN_NAME}
- SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE}
- SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}'
- SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS
- SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR}
ports:
- 3000:3000
volumes:
diff --git a/docs/zh/14-reference/08-taos-shell.md b/docs/zh/14-reference/08-taos-shell.md
index 2f3b551502c8b9da789220b1b20e701e038dc5e7..580454987840b61a5efff4acd545443ebca9904b 100644
--- a/docs/zh/14-reference/08-taos-shell.md
+++ b/docs/zh/14-reference/08-taos-shell.md
@@ -8,7 +8,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine
## 安装
-如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [连接器](/reference/connector/)。
+如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [连接器](../../connector/)。
## 执行
@@ -18,7 +18,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine
taos
```
-如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
+如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](../../train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
```cmd
taos>
diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md
index 743fc2d32f82778fb97e7879972cd23db1159c8e..58bbe1e1178fbb1a1aa649508b0e36b331964753 100644
--- a/docs/zh/14-reference/11-docker/index.md
+++ b/docs/zh/14-reference/11-docker/index.md
@@ -32,7 +32,7 @@ taos> show databases;
Query OK, 2 rows in database (0.033802s)
```
-因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 taos shell 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 taos shell 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。
+因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 TDengine CLI 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 TDengine CLI 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。
## 在 host 网络上启动 TDengine
@@ -75,7 +75,7 @@ docker run -d \
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
```
-最后,可以从 taos shell 或者任意连接器以 "tdengine" 为服务端地址访问 TDengine 服务。
+最后,可以从 TDengine CLI 或者任意连接器以 "tdengine" 为服务端地址访问 TDengine 服务。
```shell
taos -h tdengine -P 6030
@@ -119,7 +119,7 @@ taos -h tdengine -P 6030
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -234,7 +234,7 @@ go mod tidy
```dockerfile
FROM golang:1.19.0-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -250,7 +250,7 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -354,7 +354,7 @@ test-docker_td-2_1 /tini -- /usr/bin/entrypoi ... Up
test-docker_td-3_1 /tini -- /usr/bin/entrypoi ... Up
```
-4. 用 taos shell 查看 dnodes
+4. 用 TDengine CLI 查看 dnodes
```shell
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index d2efc5baf381d7631533f9b80fa2994dc16a221e..7b31e10572c4a6bafd088e7b7c14853ee0d32df1 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -698,122 +698,123 @@ charset 的有效值是 UTF-8。
| 45 | numOfVnodeFetchThreads | 否 | 是 |
| 46 | numOfVnodeWriteThreads | 否 | 是 |
| 47 | numOfVnodeSyncThreads | 否 | 是 |
-| 48 | numOfQnodeQueryThreads | 否 | 是 |
-| 49 | numOfQnodeFetchThreads | 否 | 是 |
-| 50 | numOfSnodeSharedThreads | 否 | 是 |
-| 51 | numOfSnodeUniqueThreads | 否 | 是 |
-| 52 | rpcQueueMemoryAllowed | 否 | 是 |
-| 53 | logDir | 是 | 是 |
-| 54 | minimalLogDirGB | 是 | 是 |
-| 55 | numOfLogLines | 是 | 是 |
-| 56 | asyncLog | 是 | 是 |
-| 57 | logKeepDays | 是 | 是 |
-| 58 | debugFlag | 是 | 是 |
-| 59 | tmrDebugFlag | 是 | 是 |
-| 60 | uDebugFlag | 是 | 是 |
-| 61 | rpcDebugFlag | 是 | 是 |
-| 62 | jniDebugFlag | 是 | 是 |
-| 63 | qDebugFlag | 是 | 是 |
-| 64 | cDebugFlag | 是 | 是 |
-| 65 | dDebugFlag | 是 | 是 |
-| 66 | vDebugFlag | 是 | 是 |
-| 67 | mDebugFlag | 是 | 是 |
-| 68 | wDebugFlag | 是 | 是 |
-| 69 | sDebugFlag | 是 | 是 |
-| 70 | tsdbDebugFlag | 是 | 是 |
-| 71 | tqDebugFlag | 否 | 是 |
-| 72 | fsDebugFlag | 是 | 是 |
-| 73 | udfDebugFlag | 否 | 是 |
-| 74 | smaDebugFlag | 否 | 是 |
-| 75 | idxDebugFlag | 否 | 是 |
-| 76 | tdbDebugFlag | 否 | 是 |
-| 77 | metaDebugFlag | 否 | 是 |
-| 78 | timezone | 是 | 是 |
-| 79 | locale | 是 | 是 |
-| 80 | charset | 是 | 是 |
-| 81 | udf | 是 | 是 |
-| 82 | enableCoreFile | 是 | 是 |
-| 83 | arbitrator | 是 | 否 |
-| 84 | numOfThreadsPerCore | 是 | 否 |
-| 85 | numOfMnodes | 是 | 否 |
-| 86 | vnodeBak | 是 | 否 |
-| 87 | balance | 是 | 否 |
-| 88 | balanceInterval | 是 | 否 |
-| 89 | offlineThreshold | 是 | 否 |
-| 90 | role | 是 | 否 |
-| 91 | dnodeNopLoop | 是 | 否 |
-| 92 | keepTimeOffset | 是 | 否 |
-| 93 | rpcTimer | 是 | 否 |
-| 94 | rpcMaxTime | 是 | 否 |
-| 95 | rpcForceTcp | 是 | 否 |
-| 96 | tcpConnTimeout | 是 | 否 |
-| 97 | syncCheckInterval | 是 | 否 |
-| 98 | maxTmrCtrl | 是 | 否 |
-| 99 | monitorReplica | 是 | 否 |
-| 100 | smlTagNullName | 是 | 否 |
-| 101 | keepColumnName | 是 | 否 |
-| 102 | ratioOfQueryCores | 是 | 否 |
-| 103 | maxStreamCompDelay | 是 | 否 |
-| 104 | maxFirstStreamCompDelay | 是 | 否 |
-| 105 | retryStreamCompDelay | 是 | 否 |
-| 106 | streamCompDelayRatio | 是 | 否 |
-| 107 | maxVgroupsPerDb | 是 | 否 |
-| 108 | maxTablesPerVnode | 是 | 否 |
-| 109 | minTablesPerVnode | 是 | 否 |
-| 110 | tableIncStepPerVnode | 是 | 否 |
-| 111 | cache | 是 | 否 |
-| 112 | blocks | 是 | 否 |
-| 113 | days | 是 | 否 |
-| 114 | keep | 是 | 否 |
-| 115 | minRows | 是 | 否 |
-| 116 | maxRows | 是 | 否 |
-| 117 | quorum | 是 | 否 |
-| 118 | comp | 是 | 否 |
-| 119 | walLevel | 是 | 否 |
-| 120 | fsync | 是 | 否 |
-| 121 | replica | 是 | 否 |
-| 122 | partitions | 是 | 否 |
-| 123 | quorum | 是 | 否 |
-| 124 | update | 是 | 否 |
-| 125 | cachelast | 是 | 否 |
-| 126 | maxSQLLength | 是 | 否 |
-| 127 | maxWildCardsLength | 是 | 否 |
-| 128 | maxRegexStringLen | 是 | 否 |
-| 129 | maxNumOfOrderedRes | 是 | 否 |
-| 130 | maxConnections | 是 | 否 |
-| 131 | mnodeEqualVnodeNum | 是 | 否 |
-| 132 | http | 是 | 否 |
-| 133 | httpEnableRecordSql | 是 | 否 |
-| 134 | httpMaxThreads | 是 | 否 |
-| 135 | restfulRowLimit | 是 | 否 |
-| 136 | httpDbNameMandatory | 是 | 否 |
-| 137 | httpKeepAlive | 是 | 否 |
-| 138 | enableRecordSql | 是 | 否 |
-| 139 | maxBinaryDisplayWidth | 是 | 否 |
-| 140 | stream | 是 | 否 |
-| 141 | retrieveBlockingModel | 是 | 否 |
-| 142 | tsdbMetaCompactRatio | 是 | 否 |
-| 143 | defaultJSONStrType | 是 | 否 |
-| 144 | walFlushSize | 是 | 否 |
-| 145 | keepTimeOffset | 是 | 否 |
-| 146 | flowctrl | 是 | 否 |
-| 147 | slaveQuery | 是 | 否 |
-| 148 | adjustMaster | 是 | 否 |
-| 149 | topicBinaryLen | 是 | 否 |
-| 150 | telegrafUseFieldNum | 是 | 否 |
-| 151 | deadLockKillQuery | 是 | 否 |
-| 152 | clientMerge | 是 | 否 |
-| 153 | sdbDebugFlag | 是 | 否 |
-| 154 | odbcDebugFlag | 是 | 否 |
-| 155 | httpDebugFlag | 是 | 否 |
-| 156 | monDebugFlag | 是 | 否 |
-| 157 | cqDebugFlag | 是 | 否 |
-| 158 | shortcutFlag | 是 | 否 |
-| 159 | probeSeconds | 是 | 否 |
-| 160 | probeKillSeconds | 是 | 否 |
-| 161 | probeInterval | 是 | 否 |
-| 162 | lossyColumns | 是 | 否 |
-| 163 | fPrecision | 是 | 否 |
-| 164 | dPrecision | 是 | 否 |
-| 165 | maxRange | 是 | 否 |
-| 166 | range | 是 | 否 |
+| 48 | numOfVnodeRsmaThreads | 否 | 是 |
+| 49 | numOfQnodeQueryThreads | 否 | 是 |
+| 50 | numOfQnodeFetchThreads | 否 | 是 |
+| 51 | numOfSnodeSharedThreads | 否 | 是 |
+| 52 | numOfSnodeUniqueThreads | 否 | 是 |
+| 53 | rpcQueueMemoryAllowed | 否 | 是 |
+| 54 | logDir | 是 | 是 |
+| 55 | minimalLogDirGB | 是 | 是 |
+| 56 | numOfLogLines | 是 | 是 |
+| 57 | asyncLog | 是 | 是 |
+| 58 | logKeepDays | 是 | 是 |
+| 59 | debugFlag | 是 | 是 |
+| 60 | tmrDebugFlag | 是 | 是 |
+| 61 | uDebugFlag | 是 | 是 |
+| 62 | rpcDebugFlag | 是 | 是 |
+| 63 | jniDebugFlag | 是 | 是 |
+| 64 | qDebugFlag | 是 | 是 |
+| 65 | cDebugFlag | 是 | 是 |
+| 66 | dDebugFlag | 是 | 是 |
+| 67 | vDebugFlag | 是 | 是 |
+| 68 | mDebugFlag | 是 | 是 |
+| 69 | wDebugFlag | 是 | 是 |
+| 70 | sDebugFlag | 是 | 是 |
+| 71 | tsdbDebugFlag | 是 | 是 |
+| 72 | tqDebugFlag | 否 | 是 |
+| 73 | fsDebugFlag | 是 | 是 |
+| 74 | udfDebugFlag | 否 | 是 |
+| 75 | smaDebugFlag | 否 | 是 |
+| 76 | idxDebugFlag | 否 | 是 |
+| 77 | tdbDebugFlag | 否 | 是 |
+| 78 | metaDebugFlag | 否 | 是 |
+| 79 | timezone | 是 | 是 |
+| 80 | locale | 是 | 是 |
+| 81 | charset | 是 | 是 |
+| 82 | udf | 是 | 是 |
+| 83 | enableCoreFile | 是 | 是 |
+| 84 | arbitrator | 是 | 否 |
+| 85 | numOfThreadsPerCore | 是 | 否 |
+| 86 | numOfMnodes | 是 | 否 |
+| 87 | vnodeBak | 是 | 否 |
+| 88 | balance | 是 | 否 |
+| 89 | balanceInterval | 是 | 否 |
+| 90 | offlineThreshold | 是 | 否 |
+| 91 | role | 是 | 否 |
+| 92 | dnodeNopLoop | 是 | 否 |
+| 93 | keepTimeOffset | 是 | 否 |
+| 94 | rpcTimer | 是 | 否 |
+| 95 | rpcMaxTime | 是 | 否 |
+| 96 | rpcForceTcp | 是 | 否 |
+| 97 | tcpConnTimeout | 是 | 否 |
+| 98 | syncCheckInterval | 是 | 否 |
+| 99 | maxTmrCtrl | 是 | 否 |
+| 100 | monitorReplica | 是 | 否 |
+| 101 | smlTagNullName | 是 | 否 |
+| 102 | keepColumnName | 是 | 否 |
+| 103 | ratioOfQueryCores | 是 | 否 |
+| 104 | maxStreamCompDelay | 是 | 否 |
+| 105 | maxFirstStreamCompDelay | 是 | 否 |
+| 106 | retryStreamCompDelay | 是 | 否 |
+| 107 | streamCompDelayRatio | 是 | 否 |
+| 108 | maxVgroupsPerDb | 是 | 否 |
+| 109 | maxTablesPerVnode | 是 | 否 |
+| 110 | minTablesPerVnode | 是 | 否 |
+| 111 | tableIncStepPerVnode | 是 | 否 |
+| 112 | cache | 是 | 否 |
+| 113 | blocks | 是 | 否 |
+| 114 | days | 是 | 否 |
+| 115 | keep | 是 | 否 |
+| 116 | minRows | 是 | 否 |
+| 117 | maxRows | 是 | 否 |
+| 118 | quorum | 是 | 否 |
+| 119 | comp | 是 | 否 |
+| 120 | walLevel | 是 | 否 |
+| 121 | fsync | 是 | 否 |
+| 122 | replica | 是 | 否 |
+| 123 | partitions | 是 | 否 |
+| 124 | quorum | 是 | 否 |
+| 125 | update | 是 | 否 |
+| 126 | cachelast | 是 | 否 |
+| 127 | maxSQLLength | 是 | 否 |
+| 128 | maxWildCardsLength | 是 | 否 |
+| 129 | maxRegexStringLen | 是 | 否 |
+| 130 | maxNumOfOrderedRes | 是 | 否 |
+| 131 | maxConnections | 是 | 否 |
+| 132 | mnodeEqualVnodeNum | 是 | 否 |
+| 133 | http | 是 | 否 |
+| 134 | httpEnableRecordSql | 是 | 否 |
+| 135 | httpMaxThreads | 是 | 否 |
+| 136 | restfulRowLimit | 是 | 否 |
+| 137 | httpDbNameMandatory | 是 | 否 |
+| 138 | httpKeepAlive | 是 | 否 |
+| 139 | enableRecordSql | 是 | 否 |
+| 140 | maxBinaryDisplayWidth | 是 | 否 |
+| 141 | stream | 是 | 否 |
+| 142 | retrieveBlockingModel | 是 | 否 |
+| 143 | tsdbMetaCompactRatio | 是 | 否 |
+| 144 | defaultJSONStrType | 是 | 否 |
+| 145 | walFlushSize | 是 | 否 |
+| 146 | keepTimeOffset | 是 | 否 |
+| 147 | flowctrl | 是 | 否 |
+| 148 | slaveQuery | 是 | 否 |
+| 149 | adjustMaster | 是 | 否 |
+| 150 | topicBinaryLen | 是 | 否 |
+| 151 | telegrafUseFieldNum | 是 | 否 |
+| 152 | deadLockKillQuery | 是 | 否 |
+| 153 | clientMerge | 是 | 否 |
+| 154 | sdbDebugFlag | 是 | 否 |
+| 155 | odbcDebugFlag | 是 | 否 |
+| 156 | httpDebugFlag | 是 | 否 |
+| 157 | monDebugFlag | 是 | 否 |
+| 158 | cqDebugFlag | 是 | 否 |
+| 159 | shortcutFlag | 是 | 否 |
+| 160 | probeSeconds | 是 | 否 |
+| 161 | probeKillSeconds | 是 | 否 |
+| 162 | probeInterval | 是 | 否 |
+| 163 | lossyColumns | 是 | 否 |
+| 164 | fPrecision | 是 | 否 |
+| 165 | dPrecision | 是 | 否 |
+| 166 | maxRange | 是 | 否 |
+| 167 | range | 是 | 否 |
diff --git a/docs/zh/14-reference/12-directory.md b/docs/zh/14-reference/12-directory.md
index 262eb99fa5cc012d22b917479bc3d16442d06ddf..04aa6e72c9b2c0a04e35ef1f67f1138cf7d00ce2 100644
--- a/docs/zh/14-reference/12-directory.md
+++ b/docs/zh/14-reference/12-directory.md
@@ -30,7 +30,7 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。
:::note
-2.4.0.0 版本之后的 taosBenchmark 和 taosdump 需要安装独立安装包 taosTools。
+taosdump 需要安装独立安装包 taosTools。
:::
diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md
index ae4280e26a64e2d10534a0faaf70ca0704cf58a6..a33abafaf82746afbf5669c6ea564b5a87060bb8 100644
--- a/docs/zh/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md
@@ -3,7 +3,7 @@ title: Schemaless 写入
description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构'
---
-在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
+在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
@@ -36,14 +36,14 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
- 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)
- 数值类型将通过后缀来区分数据类型:
-| **序号** | **后缀** | **映射类型** | **大小(字节)** |
-| -------- | -------- | ------------ | -------------- |
-| 1 | 无或 f64 | double | 8 |
-| 2 | f32 | float | 4 |
-| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
-| 4 | i16/u16 | SmallInt/USmallInt | 2 |
-| 5 | i32/u32 | Int/UInt | 4 |
-| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
+| **序号** | **后缀** | **映射类型** | **大小(字节)** |
+| -------- | ----------- | ----------------------------- | -------------- |
+| 1 | 无或 f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
@@ -69,7 +69,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
-为了让用户可以指定生成的表名,可以通过配置smlChildTableName来指定(比如 配置smlChildTableName=tname 插入数据为st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为cpu1,注意如果多行数据tname相同,但是后面的tag_set不同,则使用第一次自动建表时指定的tag_set,其他的会忽略)。
+为了让用户可以指定生成的表名,可以通过配置 smlChildTableName 来指定(比如 配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一次自动建表时指定的 tag_set,其他的会忽略)。
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
@@ -78,11 +78,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
NULL。
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
-8. 为了提高写入的效率,默认假设同一个超级表中field_set的顺序是一样的(第一条数据包含所有的field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数smlDataFormat为false,否则,数据写入按照相同顺序写入,库中数据会异常。
+8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
-16KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
+16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
:::
diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md
index f1165c9d0f01b6812c261c6f095f38fca55c44d8..ae0a496f03e8e545525fce49ae2394a10696c09c 100644
--- a/docs/zh/14-reference/14-taosKeeper.md
+++ b/docs/zh/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: TDengine taosKeeper 使用说明
+description: TDengine 3.0 版本监控指标的导出工具
---
## 简介
@@ -22,26 +22,36 @@ taosKeeper 安装方式:
### 配置和运行方式
-
-taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
+taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
-
+
+### 环境变量启动
+
+通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+具体参数列表请参照 `taoskeeper -h` 输入结果。
+
### 配置文件启动
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**下面是配置文件的示例:**
@@ -110,7 +120,7 @@ Query OK, 1 rows in database (0.036162s)
#### 导出监控指标
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
部分结果集:
diff --git a/docs/zh/14-reference/index.md b/docs/zh/14-reference/index.md
index e9c0c4fe236b8eefec1275a447c1dd1188921ee0..9d0a44af577beba67c445dac1cfcac0475e0ce3f 100644
--- a/docs/zh/14-reference/index.md
+++ b/docs/zh/14-reference/index.md
@@ -1,5 +1,6 @@
---
title: 参考手册
+description: TDengine 中的各种组件的详细说明
---
参考手册是对 TDengine 本身、 TDengine 各语言连接器及自带的工具最详细的介绍。
diff --git a/docs/zh/17-operation/01-pkg-install.md b/docs/zh/17-operation/01-pkg-install.md
index 5e4cc931309ea8bf45b1840a7da04e336434bdab..6d93c1697b1e0936b3f6539d3b1fb95db0baa956 100644
--- a/docs/zh/17-operation/01-pkg-install.md
+++ b/docs/zh/17-operation/01-pkg-install.md
@@ -47,43 +47,99 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
-内容 TBD
+TDengine 卸载命令如下:
+
+```
+$ sudo apt-get remove tdengine
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ tdengine
+0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n] y
+(Reading database ... 135625 files and directories currently installed.)
+Removing tdengine (3.0.0.0) ...
+TDengine is removed successfully!
+
+```
+
+taosTools 卸载命令如下:
+
+```
+$ sudo apt remove taostools
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ taostools
+0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n]
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
-卸载命令如下:
+TDengine 卸载命令如下:
```
$ sudo dpkg -r tdengine
(Reading database ... 120119 files and directories currently installed.)
-Removing tdengine (3.0.0.10002) ...
+Removing tdengine (3.0.0.0) ...
TDengine is removed successfully!
```
+taosTools 卸载命令如下:
+
+```
+$ sudo dpkg -r taostools
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
-卸载命令如下:
+卸载 TDengine 命令如下:
```
$ sudo rpm -e tdengine
TDengine is removed successfully!
```
+卸载 taosTools 命令如下:
+
+```
+sudo rpm -e taostools
+taosToole is removed successfully!
+```
+
-卸载命令如下:
+卸载 TDengine 命令如下:
```
$ rmtaos
TDengine is removed successfully!
```
+卸载 taosTools 命令如下:
+
+```
+$ rmtaostools
+Start to uninstall taos tools ...
+
+taos tools is uninstalled successfully!
+```
+
在 C:\TDengine 目录下,通过运行 unins000.exe 卸载程序来卸载 TDengine。
diff --git a/docs/zh/17-operation/02-planning.mdx b/docs/zh/17-operation/02-planning.mdx
index 0d63c4eaf365036cbba1d838ba6ee860a894724d..28e3f54020632e84721c20a9f63ee2a6117e6a03 100644
--- a/docs/zh/17-operation/02-planning.mdx
+++ b/docs/zh/17-operation/02-planning.mdx
@@ -1,6 +1,7 @@
---
sidebar_label: 容量规划
title: 容量规划
+description: 如何规划一个 TDengine 集群所需的物理资源
---
使用 TDengine 来搭建一个物联网大数据平台,计算资源、存储资源需要根据业务场景进行规划。下面分别讨论系统运行所需要的内存、CPU 以及硬盘空间。
diff --git a/docs/zh/17-operation/03-tolerance.md b/docs/zh/17-operation/03-tolerance.md
index 1ce485b042d6900ccc1c1bc3bcb6779e14b776ff..79cf10c39a7028e04e7c1ebbea54738dcdc528af 100644
--- a/docs/zh/17-operation/03-tolerance.md
+++ b/docs/zh/17-operation/03-tolerance.md
@@ -1,5 +1,7 @@
---
title: 容错和灾备
+sidebar_label: 容错和灾备
+description: TDengine 的容错和灾备功能
---
## 容错
diff --git a/docs/zh/17-operation/07-import.md b/docs/zh/17-operation/07-import.md
index 7dee05720d4c3446181e8e0d81a5c27e35300ba8..17945be595f9176a528e52d2344b5cd0545c3426 100644
--- a/docs/zh/17-operation/07-import.md
+++ b/docs/zh/17-operation/07-import.md
@@ -1,5 +1,6 @@
---
title: 数据导入
+description: 如何导入外部数据到 TDengine
---
TDengine 提供多种方便的数据导入功能,一种按脚本文件导入,一种按数据文件导入,一种是 taosdump 工具导入本身导出的文件。
diff --git a/docs/zh/17-operation/08-export.md b/docs/zh/17-operation/08-export.md
index 042ecc7ba29f976d50bbca1e3155bd03b2ae7ccc..44247e28bdf5ec48ccd05ab6f7e4d3558cf23103 100644
--- a/docs/zh/17-operation/08-export.md
+++ b/docs/zh/17-operation/08-export.md
@@ -1,12 +1,13 @@
---
title: 数据导出
+description: 如何导出 TDengine 中的数据
---
为方便数据导出,TDengine 提供了两种导出方式,分别是按表导出和用 taosdump 导出。
## 按表导出 CSV 文件
-如果用户需要导出一个表或一个 STable 中的数据,可在 taos shell 中运行:
+如果用户需要导出一个表或一个 STable 中的数据,可在 TDengine CLI 中运行:
```sql
select * from >> data.csv;
diff --git a/docs/zh/17-operation/10-monitor.md b/docs/zh/17-operation/10-monitor.md
index e30be775fb5c337b2a621bea92d3af31a2cb5cc0..e936f35dcac544ad94035b5e5c9716c4aa50562e 100644
--- a/docs/zh/17-operation/10-monitor.md
+++ b/docs/zh/17-operation/10-monitor.md
@@ -1,14 +1,15 @@
---
title: 系统监控
+description: 监控 TDengine 的运行状态
---
-TDengine 启动后,会自动创建一个监测数据库 log,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine 还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在 log 库里。系统管理员可以从 CLI 直接查看这个数据库,也可以在 WEB 通过图形化界面查看这些监测信息。
+TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度等信息定时写入指定数据库。TDengine 还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息进行记录。系统管理员可以从 CLI 直接查看这个数据库,也可以在 WEB 通过图形化界面查看这些监测信息。
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项 monitor 将其关闭或打开。
## TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案
-从 2.3.3.0 开始,监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
+监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](/reference/tdinsight/) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
我们提供了一个自动化脚本 `TDinsight.sh` 对 TDinsight 进行部署。
@@ -34,21 +35,6 @@ chmod +x TDinsight.sh
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E
```
- - 使用 TDengine 数据源插件内置的阿里云短信告警通知,使用 `-s` 启用之,并设置如下参数:
-
- 1. 阿里云短信服务 Key ID,参数 `-I`
- 2. 阿里云短信服务 Key Secret,参数 `K`
- 3. 阿里云短信服务签名,参数 `-S`
- 4. 短信通知模板号,参数 `-C`
- 5. 短信通知模板输入参数,JSON 格式,参数 `-T`,如 `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`
- 6. 逗号分隔的通知手机列表,参数 `-B`
-
- ```bash
- sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \
- -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \
- -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
- ```
-
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
更多使用场景和限制请参考[TDinsight](/reference/tdinsight/) 文档。
diff --git a/docs/zh/17-operation/17-diagnose.md b/docs/zh/17-operation/17-diagnose.md
index e6e9be7153dee855867c4ba4fcd1d3258c9d788f..ec529096a7513bd625131939d67c61279721b961 100644
--- a/docs/zh/17-operation/17-diagnose.md
+++ b/docs/zh/17-operation/17-diagnose.md
@@ -1,5 +1,6 @@
---
title: 诊断及其他
+description: 一些常见问题的诊断技巧
---
## 网络连接诊断
diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx
index becb1a70a908ad27a93a763ac46343b0ec46769d..83f3f8bb25de4b99a345bafab7e8a43c3d35f14e 100644
--- a/docs/zh/20-third-party/01-grafana.mdx
+++ b/docs/zh/20-third-party/01-grafana.mdx
@@ -1,6 +1,7 @@
---
sidebar_label: Grafana
title: Grafana
+description: 使用 Grafana 与 TDengine 的详细说明
---
import Tabs from "@theme/Tabs";
diff --git a/docs/zh/20-third-party/02-prometheus.md b/docs/zh/20-third-party/02-prometheus.md
index 0fe534b8df263064e5269e1732b69893efd7a79a..eb6c3bf1d0b5f6e5d8146566969df41dbad5bf99 100644
--- a/docs/zh/20-third-party/02-prometheus.md
+++ b/docs/zh/20-third-party/02-prometheus.md
@@ -1,6 +1,7 @@
---
sidebar_label: Prometheus
title: Prometheus
+description: 使用 Prometheus 访问 TDengine
---
import Prometheus from "../14-reference/_prometheus.mdx"
diff --git a/docs/zh/20-third-party/03-telegraf.md b/docs/zh/20-third-party/03-telegraf.md
index 88a69211c0592940d7f75d34c03bcc0593cd74d6..84883e665a84db89d564314a0e47f9caab04d6ff 100644
--- a/docs/zh/20-third-party/03-telegraf.md
+++ b/docs/zh/20-third-party/03-telegraf.md
@@ -1,6 +1,7 @@
---
sidebar_label: Telegraf
title: Telegraf 写入
+description: 使用 Telegraf 向 TDengine 写入数据
---
import Telegraf from "../14-reference/_telegraf.mdx"
diff --git a/docs/zh/20-third-party/05-collectd.md b/docs/zh/20-third-party/05-collectd.md
index 04892fd42e92e962fcccadf626f67c432e78d286..cc2235f2600ec44425a2f22f39dc3c58a4ccdd5a 100644
--- a/docs/zh/20-third-party/05-collectd.md
+++ b/docs/zh/20-third-party/05-collectd.md
@@ -1,6 +1,7 @@
---
sidebar_label: collectd
title: collectd 写入
+description: 使用 collected 向 TDengine 写入数据
---
import CollectD from "../14-reference/_collectd.mdx"
diff --git a/docs/zh/20-third-party/06-statsd.md b/docs/zh/20-third-party/06-statsd.md
index 260d01183598826e1c887164d0b1b146c5e80c95..122c9fd94c57ef4979d432e2a45cc5136b1644b2 100644
--- a/docs/zh/20-third-party/06-statsd.md
+++ b/docs/zh/20-third-party/06-statsd.md
@@ -1,6 +1,7 @@
---
sidebar_label: StatsD
title: StatsD 直接写入
+description: 使用 StatsD 向 TDengine 写入
---
import StatsD from "../14-reference/_statsd.mdx"
diff --git a/docs/zh/20-third-party/07-icinga2.md b/docs/zh/20-third-party/07-icinga2.md
index ed1f1404a730eca5f51e2ff9bbcd54949018f8ea..06ead57655cfad7bcf88945780dbed52e9c58e16 100644
--- a/docs/zh/20-third-party/07-icinga2.md
+++ b/docs/zh/20-third-party/07-icinga2.md
@@ -1,6 +1,7 @@
---
sidebar_label: icinga2
title: icinga2 写入
+description: 使用 icinga2 写入 TDengine
---
import Icinga2 from "../14-reference/_icinga2.mdx"
diff --git a/docs/zh/20-third-party/08-tcollector.md b/docs/zh/20-third-party/08-tcollector.md
index a1245e8c27f302d56f88fa382b5f38f9bd49a0aa..78d0b4a5dfda0c1a18908f5a0f5f9314e82e3737 100644
--- a/docs/zh/20-third-party/08-tcollector.md
+++ b/docs/zh/20-third-party/08-tcollector.md
@@ -1,6 +1,7 @@
---
sidebar_label: TCollector
title: TCollector 写入
+description: 使用 TCollector 写入 TDengine
---
import TCollector from "../14-reference/_tcollector.mdx"
diff --git a/docs/zh/20-third-party/09-emq-broker.md b/docs/zh/20-third-party/09-emq-broker.md
index dd98374558080a0ea11cbc22ede58b66a3984191..782a139e223456d0f3484d282d641075be1a3f81 100644
--- a/docs/zh/20-third-party/09-emq-broker.md
+++ b/docs/zh/20-third-party/09-emq-broker.md
@@ -1,6 +1,7 @@
---
sidebar_label: EMQX Broker
title: EMQX Broker 写入
+description: 使用 EMQX Broker 写入 TDengine
---
MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/emqx)是一开源的 MQTT Broker 软件,无需任何代码,只需要在 EMQX Dashboard 里使用“规则”做简单配置,即可将 MQTT 的数据直接写入 TDengine。EMQX 支持通过 发送到 Web 服务的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。
@@ -90,7 +91,7 @@ http://127.0.0.1:6041/rest/sql
```
Basic cm9vdDp0YW9zZGF0YQ==
```
-相关文档请参考[ TDengine REST API 文档](/reference/rest-api/)。
+相关文档请参考[ TDengine REST API 文档](../../connector/rest-api/)。
在消息体中输入规则引擎替换模板:
diff --git a/docs/zh/20-third-party/10-hive-mq-broker.md b/docs/zh/20-third-party/10-hive-mq-broker.md
index f75ed793d6272ae27f92676e2096ef455f638aa6..a388ff6daff41aa6f74af646f6121a360da56f36 100644
--- a/docs/zh/20-third-party/10-hive-mq-broker.md
+++ b/docs/zh/20-third-party/10-hive-mq-broker.md
@@ -1,6 +1,7 @@
---
sidebar_label: HiveMQ Broker
title: HiveMQ Broker 写入
+description: 使用 HivMQ Broker 写入 TDengine
---
[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器 M2M 通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。
diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md
index 8369806adcfe1b195348e7d60160609cde9150e8..1172f4fbc5bcd9f240bd5e2a47108a8791810e76 100644
--- a/docs/zh/20-third-party/11-kafka.md
+++ b/docs/zh/20-third-party/11-kafka.md
@@ -1,6 +1,7 @@
---
sidebar_label: Kafka
-title: TDengine Kafka Connector 使用教程
+title: TDengine Kafka Connector
+description: 使用 TDengine Kafka Connector 的详细指南
---
TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDengine Sink Connector。用户只需提供简单的配置文件,就可以将 Kafka 中指定 topic 的数据(批量或实时)同步到 TDengine, 或将 TDengine 中指定数据库的数据(批量或实时)同步到 Kafka。
@@ -184,7 +185,7 @@ echo `cat /tmp/confluent.current`/connect/connect.stdout
TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。
-TDengine Sink Connector 内部使用 TDengine [无模式写入接口](/reference/connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。
+TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
diff --git a/docs/zh/20-third-party/12-google-data-studio.md b/docs/zh/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000000000000000000000000000000000..bc06f0ea3261bcd93247e0c7b8e1d6c3628f3121
--- /dev/null
+++ b/docs/zh/20-third-party/12-google-data-studio.md
@@ -0,0 +1,39 @@
+---
+sidebar_label: Google Data Studio
+title: TDengine Google Data Studio Connector
+description: 使用 Google Data Studio 存取 TDengine 数据的详细指南
+---
+
+Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。
+
+Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage,或是通过连接器来接入其它数据源。
+
+![01](gds/gds-01.webp)
+
+目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine,将其选作数据源。
+
+![02](gds/gds-02.png.webp)
+
+接下来选择 AUTHORIZE 按钮。
+
+![03](gds/gds-03.png.webp)
+
+设置允许连接自己的账号到外部服务。
+
+![04](gds/gds-04.png.webp)
+
+在接下来的页面选择运行 TDengine REST 服务的 URL,并输入用户名、密码、数据库名称、表名称以及查询时间范围,并点击右上角的 CONNECT 按钮。
+
+![05](gds/gds-05.png.webp)
+
+连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。
+
+![06](gds/gds-06.png.webp)
+
+目前的维度和指标规则是:timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。
+
+![07](gds/gds-07.png.webp)
+![08](gds/gds-08.png.webp)
+![09](gds/gds-09.png.webp)
+![10](gds/gds-10.png.webp)
+![11](gds/gds-11.png.webp)
diff --git a/docs/zh/20-third-party/gds/gds-01.webp b/docs/zh/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000000000000000000000000000000000..2e5f9e4ff5db1e37718e2397c9a13a9f0e05602d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-01.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-02.png.webp b/docs/zh/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3b3537f5a488019482f94452e70bd1bd79867ab5
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-03.png.webp b/docs/zh/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5719436d5b2f21aa861067b966511e4b34d17dce
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-04.png.webp b/docs/zh/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ddaae5c1a63b6b4db692e12491df55b88dcaadee
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-05.png.webp b/docs/zh/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9a917678fc7e60f0a739fa1e2b0f4fa010d12708
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-06.png.webp b/docs/zh/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..c76b68d32b5907bd5ba4e4010456f2ca5303448f
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-07.png.webp b/docs/zh/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1386ae9c4db4f2465dd071afc5a047658b47031c
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-08.png.webp b/docs/zh/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..59dcf8b31df8bde8d4073ee0c7b1c7bdd7bd439d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-09.png.webp b/docs/zh/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b94439f211a814f66d41231c9386c57f3ffe8322
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-10.png.webp b/docs/zh/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a63cad9e9a3d412b1132359506530498fb1a0e57
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-11.png.webp b/docs/zh/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fc38cd9a29c00afa48238741c33b439f737a7b8f
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md
index a910c584d6ba47844d51e45e5010581075a72fb6..704524fd210152af34e15d248d3d4dbe050e4fef 100644
--- a/docs/zh/21-tdinternal/01-arch.md
+++ b/docs/zh/21-tdinternal/01-arch.md
@@ -1,6 +1,7 @@
---
sidebar_label: 整体架构
title: 整体架构
+description: TDengine 架构设计,包括:集群、存储、缓存与持久化、数据备份、多级存储等
---
## 集群与基本逻辑单元
@@ -287,7 +288,7 @@ TDengine 对每个数据采集点单独建表,但在实际应用中经常需
7. vnode 返回本节点的查询计算结果;
8. qnode 完成多节点数据聚合后将最终查询结果返回给客户端;
-由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。
+由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TDengine SQL。
### 预计算
diff --git a/docs/zh/21-tdinternal/03-high-availability.md b/docs/zh/21-tdinternal/03-high-availability.md
index ba056b6f162df90fcb271fe536a2b24d0745f75a..4cdf04f6d14d73a819f90bc2317a713c90fa9b91 100644
--- a/docs/zh/21-tdinternal/03-high-availability.md
+++ b/docs/zh/21-tdinternal/03-high-availability.md
@@ -1,5 +1,6 @@
---
title: 高可用
+description: TDengine 的高可用设计
---
## Vnode 的高可用性
diff --git a/docs/zh/21-tdinternal/05-load-balance.md b/docs/zh/21-tdinternal/05-load-balance.md
index 2376dd3e612a00006eaf2fc7b1782da3901908bc..07af2328d52573343fb28c045b25785f6822191f 100644
--- a/docs/zh/21-tdinternal/05-load-balance.md
+++ b/docs/zh/21-tdinternal/05-load-balance.md
@@ -1,5 +1,6 @@
---
title: 负载均衡
+description: TDengine 的负载均衡设计
---
TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TDengine 采用 Hash 一致性算法将一个数据库中的所有表和子表的数据均衡分散在属于该数据库的所有 vgroup 中,每张表或子表只能由一个 vgroup 处理,一个 vgroup 可能负责处理多个表或子表。
@@ -7,7 +8,7 @@ TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TD
创建数据库时可以指定其中的 vgroup 的数量:
```sql
-create database db0 vgroups 100;
+create database db0 vgroups 20;
```
如何指定合适的 vgroup 的数量,这取决于系统资源。假定系统中只计划建立一个数据库,则 vgroup 数量由集群中所有 dnode 所能使用的资源决定。原则上可用的 CPU 和 Memory 越多,可建立的 vgroup 也越多。但也要考虑到磁盘性能,过多的 vgroup 在磁盘性能达到上限后反而会拖累整个系统的性能。假如系统中会建立多个数据库,则多个数据库的 vgroup 之和取决于系统中可用资源的数量。要综合考虑多个数据库之间表的数量、写入频率、数据量等多个因素在多个数据库之间分配 vgroup。实际中建议首先根据系统资源配置选择一个初始的 vgroup 数量,比如 CPU 总核数的 2 倍,以此为起点通过测试找到最佳的 vgroup 数量配置,此为系统中的 vgroup 总数。如果有多个数据库的话,再根据各个数据库的表数和数据量对 vgroup 进行分配。
diff --git a/docs/zh/21-tdinternal/index.md b/docs/zh/21-tdinternal/index.md
index 63a746623e0dd955f61ba887a76f8ecf7eb16972..21f106edc999972f9e1cc4b04bc8308878cee56a 100644
--- a/docs/zh/21-tdinternal/index.md
+++ b/docs/zh/21-tdinternal/index.md
@@ -1,5 +1,6 @@
---
title: 技术内幕
+description: TDengine 的内部设计
---
```mdx-code-block
diff --git a/docs/zh/25-application/01-telegraf.md b/docs/zh/25-application/01-telegraf.md
index 95df8699ef85b02d6e9dba398c787644fc9089b2..4e9597f96454730ebcdee5adeebf55439923e8e7 100644
--- a/docs/zh/25-application/01-telegraf.md
+++ b/docs/zh/25-application/01-telegraf.md
@@ -1,6 +1,7 @@
---
sidebar_label: TDengine + Telegraf + Grafana
-title: 使用 TDengine + Telegraf + Grafana 快速搭建 IT 运维展示系统
+title: TDengine + Telegraf + Grafana
+description: 使用 TDengine + Telegraf + Grafana 快速搭建 IT 运维展示系统
---
## 背景介绍
@@ -34,7 +35,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
### TDengine
-从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.4.0.x 或以上版本安装。
+从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 版本安装。
## 数据链路设置
@@ -79,4 +80,4 @@ sudo systemctl start telegraf
## 总结
-以上演示如何快速搭建一个完整的 IT 运维展示系统。得力于 TDengine 2.4.0.0 版本中新增的 schemaless 协议解析功能,以及强大的生态软件适配能力,用户可以短短数分钟就可以搭建一个高效易用的 IT 运维系统。TDengine 强大的数据写入查询性能和其他丰富功能请参考官方文档和产品落地案例。
+以上演示如何快速搭建一个完整的 IT 运维展示系统。得力于 TDengine 的 schemaless 协议解析功能,以及强大的生态软件适配能力,用户可以短短数分钟就可以搭建一个高效易用的 IT 运维系统。TDengine 强大的数据写入查询性能和其他丰富功能请参考官方文档和产品落地案例。
diff --git a/docs/zh/25-application/02-collectd.md b/docs/zh/25-application/02-collectd.md
index 78c61bb969092d7040ddcb3d02ce7bd29a784858..c6230f48abb545e3064f406d9005a4a3ba8ea5ba 100644
--- a/docs/zh/25-application/02-collectd.md
+++ b/docs/zh/25-application/02-collectd.md
@@ -1,6 +1,7 @@
---
sidebar_label: TDengine + collectd/StatsD + Grafana
-title: 使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统
+title: TDengine + collectd/StatsD + Grafana
+description: 使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统
---
## 背景介绍
@@ -36,7 +37,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
### 安装 TDengine
-从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.4.0.x 或以上版本安装。
+从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 版本安装。
## 数据链路设置
@@ -90,6 +91,6 @@ repeater 部分添加 { host:'', port:
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
new file mode 100644
index 0000000000000000000000000000000000000000..e3e146313115fee12e539a161792234c2df671a5
--- /dev/null
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -0,0 +1,16 @@
+---
+sidebar_label: TDengine 发布历史
+title: TDengine 发布历史
+description: TDengine 发布历史、Release Notes 及下载链接
+---
+
+import Release from "/components/ReleaseV3";
+
+## 3.0.0.1
+
+
+
+
+
diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md
new file mode 100644
index 0000000000000000000000000000000000000000..61129d74e57504286660a178f757cb816b75dbb5
--- /dev/null
+++ b/docs/zh/28-releases/02-tools.md
@@ -0,0 +1,11 @@
+---
+sidebar_label: taosTools 发布历史
+title: taosTools 发布历史
+description: taosTools 的发布历史、Release Notes 和下载链接
+---
+
+import Release from "/components/ReleaseV3";
+
+## 2.1.2
+
+
\ No newline at end of file
diff --git a/docs/zh/28-releases/_category_.yml b/docs/zh/28-releases/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..dcd57247d7629e0bd46a22394c79182fccb22ede
--- /dev/null
+++ b/docs/zh/28-releases/_category_.yml
@@ -0,0 +1 @@
+label: 发布历史
\ No newline at end of file
diff --git a/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/examples/JDBC/JDBCDemo/README-jdbc-windows.md
index 17c5c8df00ab8727d1adfe493d3fbbd32891a676..5a781f40f730218286edb9f6a7f184ee79e7a5fc 100644
--- a/examples/JDBC/JDBCDemo/README-jdbc-windows.md
+++ b/examples/JDBC/JDBCDemo/README-jdbc-windows.md
@@ -129,7 +129,7 @@ https://www.taosdata.com/cn/all-downloads/
192.168.236.136 td01
```
-配置完成后,在命令行内使用taos shell连接server端
+配置完成后,在命令行内使用TDengine CLI连接server端
```shell
C:\TDengine>taos -h td01
diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt
index 9d06dbac6dc3ba9d4dcafe6d8316b52e1b3daeca..4a9007acecaa679dc716c5665eea7f0cd1e34dbb 100644
--- a/examples/c/CMakeLists.txt
+++ b/examples/c/CMakeLists.txt
@@ -13,15 +13,9 @@ IF (TD_LINUX)
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
add_executable(tmq "")
- add_executable(tmq_taosx "")
add_executable(stream_demo "")
add_executable(demoapi "")
- target_sources(tmq_taosx
- PRIVATE
- "tmq_taosx.c"
- )
-
target_sources(tmq
PRIVATE
"tmq.c"
@@ -41,10 +35,6 @@ IF (TD_LINUX)
taos_static
)
- target_link_libraries(tmq_taosx
- taos_static
- )
-
target_link_libraries(stream_demo
taos_static
)
@@ -57,10 +47,6 @@ IF (TD_LINUX)
PUBLIC "${TD_SOURCE_DIR}/include/os"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
- target_include_directories(tmq_taosx
- PUBLIC "${TD_SOURCE_DIR}/include/os"
- PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
- )
target_include_directories(stream_demo
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
@@ -73,7 +59,6 @@ IF (TD_LINUX)
)
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
- SET_TARGET_PROPERTIES(tmq_taosx PROPERTIES OUTPUT_NAME tmq_taosx)
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
ENDIF ()
diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c
index 2fcf4dd62c1e0a2f5aabda4ce5eb9fae6aa72be8..1c9d11b755f77bf259e45d77c6e5983c3747835a 100644
--- a/examples/c/stream_demo.c
+++ b/examples/c/stream_demo.c
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+// clang-format off
#include
#include
#include
@@ -94,13 +95,8 @@ int32_t create_stream() {
}
taos_free_result(pRes);
- /*const char* sql = "select min(k), max(k), sum(k) from tu1";*/
- /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
- /*const char* sql = "select sum(k) from tu1 interval(10m)";*/
- /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
pRes = taos_query(pConn,
- "create stream stream1 trigger max_delay 10s watermark 10s into outstb as select _wstart start, "
- "count(k) from st1 partition by tbname interval(20s) ");
+ "create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, avg(k) from st1 partition by tbname interval(10s)");
if (taos_errno(pRes) != 0) {
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1;
diff --git a/examples/c/tmq_taosx.c b/examples/c/tmq_taosx.c
deleted file mode 100644
index d0def4426905b773db948b0cf6f0d22c8733d5da..0000000000000000000000000000000000000000
--- a/examples/c/tmq_taosx.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-#include "taos.h"
-
-static int running = 1;
-
-static TAOS* use_db(){
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return NULL;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "use db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes));
- return NULL;
- }
- taos_free_result(pRes);
- return pConn;
-}
-
-static void msg_process(TAOS_RES* msg) {
- /*memset(buf, 0, 1024);*/
- printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
- printf("db: %s\n", tmq_get_db_name(msg));
- printf("vg: %d\n", tmq_get_vgroup_id(msg));
- TAOS *pConn = use_db();
- if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
- char* result = tmq_get_json_meta(msg);
- if (result) {
- printf("meta result: %s\n", result);
- }
- tmq_free_json_meta(result);
- }
-
- tmq_raw_data raw = {0};
- tmq_get_raw(msg, &raw);
- int32_t ret = tmq_write_raw(pConn, raw);
- printf("write raw data: %s\n", tmq_err2str(ret));
-
-// else{
-// while(1){
-// int numOfRows = 0;
-// void *pData = NULL;
-// taos_fetch_raw_block(msg, &numOfRows, &pData);
-// if(numOfRows == 0) break;
-// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
-// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
-// printf("write raw data: %s\n", tmq_err2str(ret));
-// }
-// }
-
- taos_close(pConn);
-}
-
-int32_t init_env() {
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 4");
- if (taos_errno(pRes) != 0) {
- printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop database if exists abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists abc1 vgroups 3");
- if (taos_errno(pRes) != 0) {
- printf("error in create db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 select * from ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
- if (taos_errno(pRes) != 0) {
- printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table ct3 ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table st1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
- if (taos_errno(pRes) != 0) {
- printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 comment 'hello'");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 drop column c1");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table n1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt2 using jt tags('')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table st1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-int32_t create_topic() {
- printf("create topic\n");
- TAOS_RES* pRes;
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
- if (taos_errno(pRes) != 0) {
- printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
- printf("commit %d tmq %p param %p\n", code, tmq, param);
-}
-
-tmq_t* build_consumer() {
-#if 0
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- assert(pConn != NULL);
-
- TAOS_RES* pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- }
- taos_free_result(pRes);
-#endif
-
- tmq_conf_t* conf = tmq_conf_new();
- tmq_conf_set(conf, "group.id", "tg2");
- tmq_conf_set(conf, "client.id", "my app 1");
- tmq_conf_set(conf, "td.connect.user", "root");
- tmq_conf_set(conf, "td.connect.pass", "taosdata");
- tmq_conf_set(conf, "msg.with.table.name", "true");
- tmq_conf_set(conf, "enable.auto.commit", "true");
-
- /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
-
- tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
- tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
- assert(tmq);
- tmq_conf_destroy(conf);
- return tmq;
-}
-
-tmq_list_t* build_topic_list() {
- tmq_list_t* topic_list = tmq_list_new();
- tmq_list_append(topic_list, "topic_ctb_column");
- /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
- return topic_list;
-}
-
-void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- printf("subscribe err\n");
- return;
- }
- int32_t cnt = 0;
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, -1);
- if (tmqmessage) {
- cnt++;
- msg_process(tmqmessage);
- /*if (cnt >= 2) break;*/
- /*printf("get data\n");*/
- taos_free_result(tmqmessage);
- /*} else {*/
- /*break;*/
- /*tmq_commit_sync(tmq, NULL);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- static const int MIN_COMMIT_COUNT = 1;
-
- int msg_count = 0;
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- return;
- }
-
- tmq_list_t* subList = NULL;
- tmq_subscription(tmq, &subList);
- char** subTopics = tmq_list_to_c_array(subList);
- int32_t sz = tmq_list_get_size(subList);
- printf("subscribed topics: ");
- for (int32_t i = 0; i < sz; i++) {
- printf("%s, ", subTopics[i]);
- }
- printf("\n");
- tmq_list_destroy(subList);
-
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- msg_process(tmqmessage);
- taos_free_result(tmqmessage);
-
- /*tmq_commit_sync(tmq, NULL);*/
- /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-int main(int argc, char* argv[]) {
- printf("env init\n");
- if (init_env() < 0) {
- return -1;
- }
- create_topic();
-
- tmq_t* tmq = build_consumer();
- tmq_list_t* topic_list = build_topic_list();
- basic_consume_loop(tmq, topic_list);
- /*sync_consume_loop(tmq, topic_list);*/
-}
diff --git a/examples/nodejs/README-win.md b/examples/nodejs/README-win.md
index 75fec69413af2bb49498118ec7235c9947e2f89e..e496be2f87e3ff0fcc01359f23888734669b0c22 100644
--- a/examples/nodejs/README-win.md
+++ b/examples/nodejs/README-win.md
@@ -35,7 +35,7 @@ Python 2.7.18
下载地址:https://www.taosdata.com/cn/all-downloads/,选择一个合适的windows-client下载(client应该尽量与server端的版本保持一致)
-使用client的taos shell连接server
+使用client的TDengine CLI连接server
```shell
>taos -h node5
diff --git a/include/client/taos.h b/include/client/taos.h
index f260b84f4aaf238badb1de3a6446b639b5681fa9..49cfbb52b80e88103fe6befc6d2818641e731fcf 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -254,6 +254,7 @@ enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
+ TMQ_RES_TAOSX = 3,
};
typedef struct tmq_raw_data {
diff --git a/include/common/systable.h b/include/common/systable.h
index ed2e6a46c35006f8f9ffc189a98f3df5e2ac9ade..882c54de952dc044ed30aa6a1aed66145c0db804 100644
--- a/include/common/systable.h
+++ b/include/common/systable.h
@@ -22,54 +22,58 @@ extern "C" {
#ifndef TDENGINE_SYSTABLE_H
#define TDENGINE_SYSTABLE_H
-#define TSDB_INFORMATION_SCHEMA_DB "information_schema"
-#define TSDB_INS_TABLE_DNODES "ins_dnodes"
-#define TSDB_INS_TABLE_MNODES "ins_mnodes"
-#define TSDB_INS_TABLE_MODULES "ins_modules"
-#define TSDB_INS_TABLE_QNODES "ins_qnodes"
-#define TSDB_INS_TABLE_BNODES "ins_bnodes"
-#define TSDB_INS_TABLE_SNODES "ins_snodes"
-#define TSDB_INS_TABLE_CLUSTER "ins_cluster"
-#define TSDB_INS_TABLE_DATABASES "ins_databases"
-#define TSDB_INS_TABLE_FUNCTIONS "ins_functions"
-#define TSDB_INS_TABLE_INDEXES "ins_indexes"
-#define TSDB_INS_TABLE_STABLES "ins_stables"
-#define TSDB_INS_TABLE_TABLES "ins_tables"
-#define TSDB_INS_TABLE_TAGS "ins_tags"
-#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed"
-#define TSDB_INS_TABLE_USERS "ins_users"
-#define TSDB_INS_TABLE_LICENCES "ins_grants"
-#define TSDB_INS_TABLE_VGROUPS "ins_vgroups"
-#define TSDB_INS_TABLE_VNODES "ins_vnodes"
-#define TSDB_INS_TABLE_CONFIGS "ins_configs"
-#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables"
+#define TSDB_INFORMATION_SCHEMA_DB "information_schema"
+#define TSDB_INS_TABLE_DNODES "ins_dnodes"
+#define TSDB_INS_TABLE_MNODES "ins_mnodes"
+#define TSDB_INS_TABLE_MODULES "ins_modules"
+#define TSDB_INS_TABLE_QNODES "ins_qnodes"
+#define TSDB_INS_TABLE_BNODES "ins_bnodes"
+#define TSDB_INS_TABLE_SNODES "ins_snodes"
+#define TSDB_INS_TABLE_CLUSTER "ins_cluster"
+#define TSDB_INS_TABLE_DATABASES "ins_databases"
+#define TSDB_INS_TABLE_FUNCTIONS "ins_functions"
+#define TSDB_INS_TABLE_INDEXES "ins_indexes"
+#define TSDB_INS_TABLE_STABLES "ins_stables"
+#define TSDB_INS_TABLE_TABLES "ins_tables"
+#define TSDB_INS_TABLE_TAGS "ins_tags"
+#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed"
+#define TSDB_INS_TABLE_USERS "ins_users"
+#define TSDB_INS_TABLE_LICENCES "ins_grants"
+#define TSDB_INS_TABLE_VGROUPS "ins_vgroups"
+#define TSDB_INS_TABLE_VNODES "ins_vnodes"
+#define TSDB_INS_TABLE_CONFIGS "ins_configs"
+#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables"
+#define TSDB_INS_TABLE_SUBSCRIPTIONS "ins_subscriptions"
+#define TSDB_INS_TABLE_TOPICS "ins_topics"
+#define TSDB_INS_TABLE_STREAMS "ins_streams"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
#define TSDB_PERFS_TABLE_CONNECTIONS "perf_connections"
#define TSDB_PERFS_TABLE_QUERIES "perf_queries"
-#define TSDB_PERFS_TABLE_TOPICS "perf_topics"
#define TSDB_PERFS_TABLE_CONSUMERS "perf_consumers"
-#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "perf_subscriptions"
#define TSDB_PERFS_TABLE_OFFSETS "perf_offsets"
#define TSDB_PERFS_TABLE_TRANS "perf_trans"
-#define TSDB_PERFS_TABLE_STREAMS "perf_streams"
#define TSDB_PERFS_TABLE_APPS "perf_apps"
typedef struct SSysDbTableSchema {
const char* name;
const int32_t type;
const int32_t bytes;
+ const bool sysInfo;
} SSysDbTableSchema;
typedef struct SSysTableMeta {
const char* name;
const SSysDbTableSchema* schema;
const int32_t colNum;
+ const bool sysInfo;
} SSysTableMeta;
void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size);
void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size);
+void getVisibleInfosTablesNum(bool sysInfo, size_t* size);
+bool invisibleColumn(bool sysInfo, int8_t tableType, int8_t flags);
#ifdef __cplusplus
}
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index dbe020f7ecaf869f2e3fdb99fb86e33c5f873ecb..891c9ab040cfa6acdff55be1889a2bebe01ec2d3 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -44,11 +44,36 @@ enum {
)
// clang-format on
+typedef struct {
+ TSKEY ts;
+ uint64_t groupId;
+} SWinKey;
+
+static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
+ SWinKey* pWin1 = (SWinKey*)pKey1;
+ SWinKey* pWin2 = (SWinKey*)pKey2;
+
+ if (pWin1->groupId > pWin2->groupId) {
+ return 1;
+ } else if (pWin1->groupId < pWin2->groupId) {
+ return -1;
+ }
+
+ if (pWin1->ts > pWin2->ts) {
+ return 1;
+ } else if (pWin1->ts < pWin2->ts) {
+ return -1;
+ }
+
+ return 0;
+}
+
enum {
TMQ_MSG_TYPE__DUMMY = 0,
TMQ_MSG_TYPE__POLL_RSP,
TMQ_MSG_TYPE__POLL_META_RSP,
TMQ_MSG_TYPE__EP_RSP,
+ TMQ_MSG_TYPE__TAOSX_RSP,
TMQ_MSG_TYPE__END_RSP,
};
@@ -105,7 +130,6 @@ typedef struct SDataBlockInfo {
uint32_t capacity;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization
- int64_t ts; // used for stream, and need serialization
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
@@ -160,6 +184,7 @@ typedef struct SQueryTableDataCond {
STimeWindow twindows;
int64_t startVersion;
int64_t endVersion;
+ int64_t schemaVersion;
} SQueryTableDataCond;
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
@@ -181,7 +206,7 @@ typedef struct SColumn {
int16_t slotId;
char name[TSDB_COL_NAME_LEN];
- int8_t flag; // column type: normal column, tag, or user-input column (integer/float/string)
+ int16_t colType; // column type: normal column, tag, or window column
int16_t type;
int32_t bytes;
uint8_t precision;
diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h
index af7c88acded2e151ff730ccb1ade5fdf15f9862a..df16f4f0ab9ad1a79c11ede9e54fdc086e9204df 100644
--- a/include/common/tdataformat.h
+++ b/include/common/tdataformat.h
@@ -96,6 +96,7 @@ char *tTagValToData(const STagVal *pTagVal, bool isJson);
int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
+void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid);
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index cd74ffd47764fab78f224c2f373e0c93e8117d12..2de4ffdc17347f2e3afb2793a95e32b4cea966e6 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -66,6 +66,7 @@ extern int32_t tsNumOfVnodeStreamThreads;
extern int32_t tsNumOfVnodeFetchThreads;
extern int32_t tsNumOfVnodeWriteThreads;
extern int32_t tsNumOfVnodeSyncThreads;
+extern int32_t tsNumOfVnodeRsmaThreads;
extern int32_t tsNumOfQnodeQueryThreads;
extern int32_t tsNumOfQnodeFetchThreads;
extern int32_t tsNumOfSnodeSharedThreads;
@@ -88,11 +89,12 @@ extern uint16_t tsTelemPort;
// query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
-extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
+extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
// query client
extern int32_t tsQueryPolicy;
extern int32_t tsQuerySmaOptimize;
+extern bool tsQueryPlannerTrace;
// client
extern int32_t tsMinSlidingTime;
@@ -130,6 +132,7 @@ extern int32_t tsMqRebalanceInterval;
extern int32_t tsTtlUnit;
extern int32_t tsTtlPushInterval;
extern int32_t tsGrantHBInterval;
+extern int32_t tsUptimeInterval;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
@@ -142,10 +145,10 @@ void taosCfgDynamicOptions(const char *option, const char *value);
struct SConfig *taosGetCfg();
-void taosSetAllDebugFlag(int32_t flag);
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
+void taosSetAllDebugFlag(int32_t flag, bool rewrite);
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosSetCfg(SConfig *pCfg, char *name);
-void taosLocalCfgForbiddenToChange(char* name, bool* forbidden);
+void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
#ifdef __cplusplus
}
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 8f199c72f7284e5a1a5192fad4f0fdd7a292bab2..8693061d12a82e6978aa70a0e663462792ebf47d 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -268,14 +268,41 @@ STSRow* tGetSubmitBlkNext(SSubmitBlkIter* pIter);
// for debug
int32_t tPrintFixedSchemaSubmitReq(SSubmitReq* pReq, STSchema* pSchema);
+struct SSchema {
+ int8_t type;
+ int8_t flags;
+ col_id_t colId;
+ int32_t bytes;
+ char name[TSDB_COL_NAME_LEN];
+};
+
typedef struct {
- int32_t code;
- int8_t hashMeta;
- int64_t uid;
- char* tblFName;
- int32_t numOfRows;
- int32_t affectedRows;
- int64_t sver;
+ char tbName[TSDB_TABLE_NAME_LEN];
+ char stbName[TSDB_TABLE_NAME_LEN];
+ char dbFName[TSDB_DB_FNAME_LEN];
+ int64_t dbId;
+ int32_t numOfTags;
+ int32_t numOfColumns;
+ int8_t precision;
+ int8_t tableType;
+ int32_t sversion;
+ int32_t tversion;
+ uint64_t suid;
+ uint64_t tuid;
+ int32_t vgId;
+ int8_t sysInfo;
+ SSchema* pSchemas;
+} STableMetaRsp;
+
+typedef struct {
+ int32_t code;
+ int8_t hashMeta;
+ int64_t uid;
+ char* tblFName;
+ int32_t numOfRows;
+ int32_t affectedRows;
+ int64_t sver;
+ STableMetaRsp* pMeta;
} SSubmitBlkRsp;
typedef struct {
@@ -290,19 +317,14 @@ typedef struct {
int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
+void tFreeSSubmitBlkRsp(void* param);
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
-#define COL_SMA_ON ((int8_t)0x1)
-#define COL_IDX_ON ((int8_t)0x2)
-#define COL_SET_NULL ((int8_t)0x10)
-#define COL_SET_VAL ((int8_t)0x20)
-struct SSchema {
- int8_t type;
- int8_t flags;
- col_id_t colId;
- int32_t bytes;
- char name[TSDB_COL_NAME_LEN];
-};
+#define COL_SMA_ON ((int8_t)0x1)
+#define COL_IDX_ON ((int8_t)0x2)
+#define COL_SET_NULL ((int8_t)0x10)
+#define COL_SET_VAL ((int8_t)0x20)
+#define COL_IS_SYSINFO ((int8_t)0x40)
#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0)
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
@@ -472,6 +494,14 @@ int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq
int32_t tDeserializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
void tFreeSMCreateStbReq(SMCreateStbReq* pReq);
+typedef struct {
+ STableMetaRsp* pMeta;
+} SMCreateStbRsp;
+
+int32_t tEncodeSMCreateStbRsp(SEncoder* pEncoder, const SMCreateStbRsp* pRsp);
+int32_t tDecodeSMCreateStbRsp(SDecoder* pDecoder, SMCreateStbRsp* pRsp);
+void tFreeSMCreateStbRsp(SMCreateStbRsp* pRsp);
+
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t igNotExists;
@@ -530,6 +560,7 @@ typedef struct {
uint32_t connId;
int32_t dnodeNum;
int8_t superUser;
+ int8_t sysInfo;
int8_t connType;
SEpSet epSet;
int32_t svrTimestamp;
@@ -1239,23 +1270,6 @@ typedef struct {
SVgroupInfo vgroups[];
} SVgroupsInfo;
-typedef struct {
- char tbName[TSDB_TABLE_NAME_LEN];
- char stbName[TSDB_TABLE_NAME_LEN];
- char dbFName[TSDB_DB_FNAME_LEN];
- int64_t dbId;
- int32_t numOfTags;
- int32_t numOfColumns;
- int8_t precision;
- int8_t tableType;
- int32_t sversion;
- int32_t tversion;
- uint64_t suid;
- uint64_t tuid;
- int32_t vgId;
- SSchema* pSchemas;
-} STableMetaRsp;
-
typedef struct {
STableMetaRsp* pMeta;
} SMAlterStbRsp;
@@ -1266,7 +1280,7 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp);
int32_t tSerializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp);
int32_t tDeserializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp);
-void tFreeSTableMetaRsp(STableMetaRsp* pRsp);
+void tFreeSTableMetaRsp(void* pRsp);
void tFreeSTableIndexRsp(void* info);
typedef struct {
@@ -2028,11 +2042,13 @@ int tEncodeSVCreateTbBatchReq(SEncoder* pCoder, const SVCreateTbBatchReq* pReq);
int tDecodeSVCreateTbBatchReq(SDecoder* pCoder, SVCreateTbBatchReq* pReq);
typedef struct {
- int32_t code;
+ int32_t code;
+ STableMetaRsp* pMeta;
} SVCreateTbRsp, SVUpdateTbRsp;
-int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
-int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
+int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
+int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
+void tFreeSVCreateTbRsp(void* param);
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
void* tDeserializeSVCreateTbReq(void* buf, SVCreateTbReq* pReq);
@@ -2054,6 +2070,7 @@ int32_t tDeserializeSVCreateTbBatchRsp(void* buf, int32_t bufLen, SVCreateTbBatc
// TDMT_VND_DROP_TABLE =================
typedef struct {
char* name;
+ uint64_t suid; // for tmq in wal format
int8_t igNotExists;
} SVDropTbReq;
@@ -2598,7 +2615,7 @@ enum {
typedef struct {
int8_t type;
union {
- // snapshot data
+ // snapshot
struct {
int64_t uid;
int64_t ts;
@@ -2610,6 +2627,22 @@ typedef struct {
};
} STqOffsetVal;
+static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
+ pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pOffsetVal->uid = uid;
+ pOffsetVal->ts = ts;
+}
+
+static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) {
+ pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META;
+ pOffsetVal->uid = uid;
+}
+
+static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
+ pOffsetVal->type = TMQ_OFFSET__LOG;
+ pOffsetVal->version = ver;
+}
+
int32_t tEncodeSTqOffsetVal(SEncoder* pEncoder, const STqOffsetVal* pOffsetVal);
int32_t tDecodeSTqOffsetVal(SDecoder* pDecoder, STqOffsetVal* pOffsetVal);
int32_t tFormatOffset(char* buf, int32_t maxLen, const STqOffsetVal* pVal);
@@ -2658,15 +2691,6 @@ typedef struct {
int32_t tSerializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
int32_t tDeserializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
-typedef struct {
- int32_t vgId;
- SEpSet epSet;
-} SVgEpSet;
-
-typedef struct {
- int32_t padding;
-} SRSmaExecMsg;
-
typedef struct {
int8_t version; // for compatibility(default 0)
int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
@@ -2926,33 +2950,14 @@ static FORCE_INLINE void tDeleteSMqSubTopicEp(SMqSubTopicEp* pSubTopicEp) {
typedef struct {
SMqRspHead head;
- int64_t reqOffset;
- int64_t rspOffset;
- STqOffsetVal reqOffsetNew;
- STqOffsetVal rspOffsetNew;
+ STqOffsetVal rspOffset;
int16_t resMsgType;
int32_t metaRspLen;
void* metaRsp;
} SMqMetaRsp;
-static FORCE_INLINE int32_t tEncodeSMqMetaRsp(void** buf, const SMqMetaRsp* pRsp) {
- int32_t tlen = 0;
- tlen += taosEncodeFixedI64(buf, pRsp->reqOffset);
- tlen += taosEncodeFixedI64(buf, pRsp->rspOffset);
- tlen += taosEncodeFixedI16(buf, pRsp->resMsgType);
- tlen += taosEncodeFixedI32(buf, pRsp->metaRspLen);
- tlen += taosEncodeBinary(buf, pRsp->metaRsp, pRsp->metaRspLen);
- return tlen;
-}
-
-static FORCE_INLINE void* tDecodeSMqMetaRsp(const void* buf, SMqMetaRsp* pRsp) {
- buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
- buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
- buf = taosDecodeFixedI16(buf, &pRsp->resMsgType);
- buf = taosDecodeFixedI32(buf, &pRsp->metaRspLen);
- buf = taosDecodeBinary(buf, &pRsp->metaRsp, pRsp->metaRspLen);
- return (void*)buf;
-}
+int32_t tEncodeSMqMetaRsp(SEncoder* pEncoder, const SMqMetaRsp* pRsp);
+int32_t tDecodeSMqMetaRsp(SDecoder* pDecoder, SMqMetaRsp* pRsp);
typedef struct {
SMqRspHead head;
@@ -2969,6 +2974,27 @@ typedef struct {
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
+void tDeleteSMqDataRsp(SMqDataRsp* pRsp);
+
+typedef struct {
+ SMqRspHead head;
+ STqOffsetVal reqOffset;
+ STqOffsetVal rspOffset;
+ int32_t blockNum;
+ int8_t withTbName;
+ int8_t withSchema;
+ SArray* blockDataLen;
+ SArray* blockData;
+ SArray* blockTbName;
+ SArray* blockSchema;
+ int32_t createTableNum;
+ SArray* createTableLen;
+ SArray* createTableReq;
+} STaosxRsp;
+
+int32_t tEncodeSTaosxRsp(SEncoder* pEncoder, const STaosxRsp* pRsp);
+int32_t tDecodeSTaosxRsp(SDecoder* pDecoder, STaosxRsp* pRsp);
+void tDeleteSTaosxRsp(STaosxRsp* pRsp);
typedef struct {
SMqRspHead head;
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index e2bb3e2ae16921b822c275fb1d9be7afcae29685..006ba7f21bf0177c2b0104a51ef7908785cced2d 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -170,6 +170,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)
diff --git a/include/common/ttypes.h b/include/common/ttypes.h
index ceb3eae0338455ab207034fca707473c6c44940d..a88f65f6acf69d552073ab0ede31a0b027b25692 100644
--- a/include/common/ttypes.h
+++ b/include/common/ttypes.h
@@ -49,9 +49,6 @@ typedef struct {
#define varDataCopy(dst, v) memcpy((dst), (void *)(v), varDataTLen(v))
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
-#define IS_VAR_DATA_TYPE(t) \
- (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
-#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
#define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0]))
#define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v))
@@ -268,11 +265,16 @@ typedef struct {
#define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT)
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)
#define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)))
+#define IS_TIMESTAMP_TYPE(_t) ((_t) == TSDB_DATA_TYPE_TIMESTAMP)
#define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t)))
#define IS_MATHABLE_TYPE(_t) \
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
+#define IS_VAR_DATA_TYPE(t) \
+ (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
+#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
+
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
#define IS_VALID_INT(_t) ((_t) >= INT32_MIN && (_t) <= INT32_MAX)
diff --git a/include/libs/command/command.h b/include/libs/command/command.h
index 8a4ecad37da3089c32ff0e3fca7473dcc334971c..b3339a417ba463212c3abc163b57519194953c10 100644
--- a/include/libs/command/command.h
+++ b/include/libs/command/command.h
@@ -17,12 +17,12 @@
#define TDENGINE_COMMAND_H
#include "cmdnodes.h"
-#include "tmsg.h"
#include "plannodes.h"
+#include "tmsg.h"
typedef struct SExplainCtx SExplainCtx;
-int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp);
+int32_t qExecCommand(bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp);
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs);
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index a64815f14fe0a0dbe5b85ffd0969a68d43f50d8e..25a6221fcb5344cd1f0d98af15840b3905321612 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -29,7 +29,7 @@ typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
-typedef struct SReadHandle {
+typedef struct {
void* tqReader;
void* meta;
void* config;
@@ -41,6 +41,10 @@ typedef struct SReadHandle {
bool initTableReader;
bool initTqReader;
int32_t numOfVgroups;
+
+ void* sContext; // SSnapContext*
+
+ void* pStateBackend;
} SReadHandle;
// in queue mode, data streams are seperated by msg
@@ -78,8 +82,8 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
/**
* @brief Cleanup SSDataBlock for StreamScanInfo
- *
- * @param tinfo
+ *
+ * @param tinfo
*/
void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo);
@@ -163,7 +167,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t
void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
-int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/);
+int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList /*,int32_t* resNum, SExplainExecInfo** pRes*/);
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
@@ -180,11 +184,17 @@ int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
-int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset);
+int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
-void* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
+SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
+
+int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo);
+
+const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo);
+
+const char* qExtractTbnameFromTask(qTaskInfo_t tinfo);
void* qExtractReaderFromStreamScanner(void* scanner);
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index d5da306fd297dd49f4753aa01c6423cb9dd82e9c..3f26eee86ad3f1b4666c55283ad346f60a7b4f31 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -92,6 +92,8 @@ struct SResultRowEntryInfo;
//for selectivity query, the corresponding tag value is assigned if the data is qualified
typedef struct SSubsidiaryResInfo {
int16_t num;
+ int32_t rowLen;
+ char* buf; // serialize data buffer
struct SqlFunctionCtx **pCtx;
} SSubsidiaryResInfo;
@@ -118,6 +120,11 @@ typedef struct SInputColumnInfoData {
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
} SInputColumnInfoData;
+typedef struct SSerializeDataHandle {
+ struct SDiskbasedBuf* pBuf;
+ int32_t currentPage;
+} SSerializeDataHandle;
+
// sql function runtime context
typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
@@ -137,11 +144,9 @@ typedef struct SqlFunctionCtx {
SFuncExecFuncs fpSet;
SScalarFuncExecFuncs sfp;
struct SExprInfo *pExpr;
- struct SDiskbasedBuf *pBuf;
struct SSDataBlock *pSrcBlock;
- struct SSDataBlock *pDstBlock; // used by indifinite rows function to set selectivity
- int32_t curBufPage;
- bool increase;
+ struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
+ SSerializeDataHandle saveHandle;
bool isStream;
char udfName[TSDB_FUNC_NAME_LEN];
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index 741b0fddebf36cd1a8f16d0d2265742bcb9ac16c..c9c19579cb1c6943c5914aebed20668a1c1ff156 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -176,7 +176,8 @@ int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc);
-bool fmIsBuiltinFunc(const char* pFunc);
+bool fmIsBuiltinFunc(const char* pFunc);
+EFunctionType fmGetFuncType(const char* pFunc);
bool fmIsAggFunc(int32_t funcId);
bool fmIsScalarFunc(int32_t funcId);
diff --git a/include/libs/function/taosudf.h b/include/libs/function/taosudf.h
index 5e84b87a81ec1808dfc368ac285f4dabd2e1d57e..2b2063e3f61e575cd59de099feee3b83ad87ff9c 100644
--- a/include/libs/function/taosudf.h
+++ b/include/libs/function/taosudf.h
@@ -256,8 +256,9 @@ static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentR
typedef int32_t (*TUdfScalarProcFunc)(SUdfDataBlock* block, SUdfColumn *resultCol);
typedef int32_t (*TUdfAggStartFunc)(SUdfInterBuf *buf);
-typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
-typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf* buf, SUdfInterBuf *resultData);
+typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
+typedef int32_t (*TUdfAggMergeFunc)(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2, SUdfInterBuf *outputBuf);
+typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf *buf, SUdfInterBuf *resultData);
#ifdef __cplusplus
}
diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h
index 3223d4cdb8dfd36284f3d36922451166226fdd3e..9652579705b4dd8e176ca3e73d0f1e5eec2716ed 100644
--- a/include/libs/nodes/cmdnodes.h
+++ b/include/libs/nodes/cmdnodes.h
@@ -78,6 +78,9 @@ typedef struct SDatabaseOptions {
int32_t walRetentionSize;
int32_t walRollPeriod;
int32_t walSegmentSize;
+ bool walRetentionPeriodIsSet;
+ bool walRetentionSizeIsSet;
+ bool walRollPeriodIsSet;
} SDatabaseOptions;
typedef struct SCreateDatabaseStmt {
diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h
index 5743d3360857dab460841d89e50360ba53d36b39..687949a9c06aa0ae18d2a731263e8b4ac62d9a1d 100644
--- a/include/libs/nodes/nodes.h
+++ b/include/libs/nodes/nodes.h
@@ -244,6 +244,7 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
+ QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 8661baceb2dc426e69e459aec33c6c730b419e7e..8aeb86102a7b4237276f59f25fe50d36c6f99efa 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -317,6 +317,7 @@ typedef struct SSystemTableScanPhysiNode {
SEpSet mgmtEpSet;
bool showRewrite;
int32_t accountId;
+ bool sysInfo;
} SSystemTableScanPhysiNode;
typedef struct STableScanPhysiNode {
@@ -487,6 +488,8 @@ typedef struct SPartitionPhysiNode {
SNodeList* pTargets;
} SPartitionPhysiNode;
+typedef SPartitionPhysiNode SStreamPartitionPhysiNode;
+
typedef struct SDataSinkNode {
ENodeType type;
SDataBlockDescNode* pInputDataBlockDesc;
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index cec6f1a6919ab66ad3928254d47a0943f60936b5..3a1eaf289e4ba245544b985e893f746845c37c88 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -57,7 +57,9 @@ typedef enum EColumnType {
COLUMN_TYPE_COLUMN = 1,
COLUMN_TYPE_TAG,
COLUMN_TYPE_TBNAME,
- COLUMN_TYPE_WINDOW_PC,
+ COLUMN_TYPE_WINDOW_START,
+ COLUMN_TYPE_WINDOW_END,
+ COLUMN_TYPE_WINDOW_DURATION,
COLUMN_TYPE_GROUP_KEY
} EColumnType;
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index 717278d51d1b252dc3f2bada18a61bbb65739b6e..95bde858640b3d4cd5df616bc1d0a5a65795d8f3 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -49,6 +49,7 @@ typedef struct SParseContext {
SStmtCallback* pStmtCb;
const char* pUser;
bool isSuperUser;
+ bool enableSysInfo;
bool async;
int8_t schemalessType;
const char* svrVer;
diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h
index d1a5c5db103d940c9e36dd9ad637461b2e3361b5..05caa7a7bb56617ef34c03e3646f85ac98f65a56 100644
--- a/include/libs/planner/planner.h
+++ b/include/libs/planner/planner.h
@@ -38,6 +38,7 @@ typedef struct SPlanContext {
char* pMsg;
int32_t msgLen;
const char* pUser;
+ bool sysInfo;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index 34d870397f953e9a85a9a5b44b6a4fc57c0594c9..1fa7dca7dc6ad975e87e18570c8a9a35d990bb7e 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -215,6 +215,7 @@ void initQueryModuleMsgHandle();
const SSchema* tGetTbnameColumnSchema();
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
+int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta);
int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta** pMeta);
char* jobTaskStatusStr(int32_t status);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 384c6a289f304e4c59a097663bb4224e979bd226..2c275090080f73577cd28b3e10b3f1e102b4556e 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -263,6 +263,14 @@ typedef struct {
SArray* checkpointVer;
} SStreamRecoveringState;
+// incremental state storage
+typedef struct {
+ SStreamTask* pOwner;
+ TDB* db;
+ TTB* pStateDb;
+ TXN txn;
+} SStreamState;
+
typedef struct SStreamTask {
int64_t streamId;
int32_t taskId;
@@ -312,6 +320,10 @@ typedef struct SStreamTask {
// msg handle
SMsgCb* pMsgCb;
+
+ // state backend
+ SStreamState* pState;
+
} SStreamTask;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
@@ -507,7 +519,7 @@ typedef struct SStreamMeta {
char* path;
TDB* db;
TTB* pTaskDb;
- TTB* pStateDb;
+ TTB* pCheckpointDb;
SHashObj* pTasks;
SHashObj* pRecoverStatus;
void* ahandle;
@@ -528,6 +540,37 @@ int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta);
+SStreamState* streamStateOpen(char* path, SStreamTask* pTask);
+void streamStateClose(SStreamState* pState);
+int32_t streamStateBegin(SStreamState* pState);
+int32_t streamStateCommit(SStreamState* pState);
+int32_t streamStateAbort(SStreamState* pState);
+
+typedef struct {
+ TBC* pCur;
+} SStreamStateCur;
+
+#if 1
+int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
+int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
+void streamFreeVal(void* val);
+
+SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
+SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
+SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
+void streamStateFreeCur(SStreamStateCur* pCur);
+
+int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
+
+int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
+int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
+
+int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
+int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
+
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/include/os/osDir.h b/include/os/osDir.h
index 9019d4f80240b2335824cb5626488bf4d0957f06..95b1a6ee1d00ab18e31522063102ff0ec9a2bab8 100644
--- a/include/os/osDir.h
+++ b/include/os/osDir.h
@@ -56,6 +56,7 @@ void taosRemoveDir(const char *dirname);
bool taosDirExist(const char *dirname);
int32_t taosMkDir(const char *dirname);
int32_t taosMulMkDir(const char *dirname);
+int32_t taosMulModeMkDir(const char *dirname, int mode);
void taosRemoveOldFiles(const char *dirname, int32_t keepDays);
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen);
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 12d6127165008e8b9e3b554ca8d1944dcef94e4e..e39172d74e52e852f0fa1812634e494d61ac6213 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -49,7 +49,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019)
//common & util
-#define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0013)
+#define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0013)
#define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014)
#define TSDB_CODE_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100)
@@ -222,7 +222,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382)
#define TSDB_CODE_MND_INVALID_DB TAOS_DEF_ERROR_CODE(0, 0x0383)
#define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385)
-#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388)
+#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388)
#define TSDB_CODE_MND_INVALID_DB_ACCT TAOS_DEF_ERROR_CODE(0, 0x0389)
#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A)
#define TSDB_CODE_MND_DB_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x038B)
@@ -433,7 +433,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TQ_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0A03)
#define TSDB_CODE_TQ_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0A04)
#define TSDB_CODE_TQ_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0A05)
-#define TSDB_CODE_TQ_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0A06)
+#define TSDB_CODE_TQ_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0A06)
#define TSDB_CODE_TQ_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0A07)
#define TSDB_CODE_TQ_META_NO_SUCH_KEY TAOS_DEF_ERROR_CODE(0, 0x0A08)
#define TSDB_CODE_TQ_META_KEY_NOT_IN_TXN TAOS_DEF_ERROR_CODE(0, 0x0A09)
@@ -490,7 +490,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_WRONG_NUMBER_OF_SELECT TAOS_DEF_ERROR_CODE(0, 0x2609)
#define TSDB_CODE_PAR_GROUPBY_LACK_EXPRESSION TAOS_DEF_ERROR_CODE(0, 0x260A)
#define TSDB_CODE_PAR_NOT_SELECTED_EXPRESSION TAOS_DEF_ERROR_CODE(0, 0x260B)
-#define TSDB_CODE_PAR_NOT_SINGLE_GROUP TAOS_DEF_ERROR_CODE(0, 0x260C)
+#define TSDB_CODE_PAR_NOT_SINGLE_GROUP TAOS_DEF_ERROR_CODE(0, 0x260C)
#define TSDB_CODE_PAR_TAGS_NOT_MATCHED TAOS_DEF_ERROR_CODE(0, 0x260D)
#define TSDB_CODE_PAR_INVALID_TAG_NAME TAOS_DEF_ERROR_CODE(0, 0x260E)
#define TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2610)
diff --git a/include/util/tcompare.h b/include/util/tcompare.h
index cc9e8ae4641138be528830e17467dab7897f0166..c7a3ca20f222c7d919460b31e9f3c55a79325f46 100644
--- a/include/util/tcompare.h
+++ b/include/util/tcompare.h
@@ -105,6 +105,97 @@ int32_t compareStrPatternNotMatch(const void *pLeft, const void *pRight);
int32_t compareWStrPatternMatch(const void *pLeft, const void *pRight);
int32_t compareWStrPatternNotMatch(const void *pLeft, const void *pRight);
+int32_t compareInt8Int16(const void *pLeft, const void *pRight);
+int32_t compareInt8Int32(const void *pLeft, const void *pRight);
+int32_t compareInt8Int64(const void *pLeft, const void *pRight);
+int32_t compareInt8Float(const void *pLeft, const void *pRight);
+int32_t compareInt8Double(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt16Int8(const void *pLeft, const void *pRight);
+int32_t compareInt16Int32(const void *pLeft, const void *pRight);
+int32_t compareInt16Int64(const void *pLeft, const void *pRight);
+int32_t compareInt16Float(const void *pLeft, const void *pRight);
+int32_t compareInt16Double(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt32Int8(const void *pLeft, const void *pRight);
+int32_t compareInt32Int16(const void *pLeft, const void *pRight);
+int32_t compareInt32Int64(const void *pLeft, const void *pRight);
+int32_t compareInt32Float(const void *pLeft, const void *pRight);
+int32_t compareInt32Double(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt64Int8(const void *pLeft, const void *pRight);
+int32_t compareInt64Int16(const void *pLeft, const void *pRight);
+int32_t compareInt64Int32(const void *pLeft, const void *pRight);
+int32_t compareInt64Float(const void *pLeft, const void *pRight);
+int32_t compareInt64Double(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint64(const void *pLeft, const void *pRight);
+int32_t compareFloatInt8(const void *pLeft, const void *pRight);
+int32_t compareFloatInt16(const void *pLeft, const void *pRight);
+int32_t compareFloatInt32(const void *pLeft, const void *pRight);
+int32_t compareFloatInt64(const void *pLeft, const void *pRight);
+int32_t compareFloatDouble(const void *pLeft, const void *pRight);
+int32_t compareFloatUint8(const void *pLeft, const void *pRight);
+int32_t compareFloatUint16(const void *pLeft, const void *pRight);
+int32_t compareFloatUint32(const void *pLeft, const void *pRight);
+int32_t compareFloatUint64(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt8(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt16(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt32(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt64(const void *pLeft, const void *pRight);
+int32_t compareDoubleFloat(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint8(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint16(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint32(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint64(const void *pLeft, const void *pRight);
+int32_t compareUint8Int8(const void *pLeft, const void *pRight);
+int32_t compareUint8Int16(const void *pLeft, const void *pRight);
+int32_t compareUint8Int32(const void *pLeft, const void *pRight);
+int32_t compareUint8Int64(const void *pLeft, const void *pRight);
+int32_t compareUint8Float(const void *pLeft, const void *pRight);
+int32_t compareUint8Double(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint32(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint16Int8(const void *pLeft, const void *pRight);
+int32_t compareUint16Int16(const void *pLeft, const void *pRight);
+int32_t compareUint16Int32(const void *pLeft, const void *pRight);
+int32_t compareUint16Int64(const void *pLeft, const void *pRight);
+int32_t compareUint16Float(const void *pLeft, const void *pRight);
+int32_t compareUint16Double(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint32(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint32Int8(const void *pLeft, const void *pRight);
+int32_t compareUint32Int16(const void *pLeft, const void *pRight);
+int32_t compareUint32Int32(const void *pLeft, const void *pRight);
+int32_t compareUint32Int64(const void *pLeft, const void *pRight);
+int32_t compareUint32Float(const void *pLeft, const void *pRight);
+int32_t compareUint32Double(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint64Int8(const void *pLeft, const void *pRight);
+int32_t compareUint64Int16(const void *pLeft, const void *pRight);
+int32_t compareUint64Int32(const void *pLeft, const void *pRight);
+int32_t compareUint64Int64(const void *pLeft, const void *pRight);
+int32_t compareUint64Float(const void *pLeft, const void *pRight);
+int32_t compareUint64Double(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint32(const void *pLeft, const void *pRight);
+
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
int32_t doCompare(const char *a, const char *b, int32_t type, size_t size);
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 2bc821b8736edf745a30e0e103734e4e7b7b31e4..4efc79a3ba75f9375735fdb4eb6df76dd4da3132 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -360,14 +360,17 @@ typedef enum ELogicConditionType {
#define TSDB_DB_SCHEMALESS_OFF 0
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
-#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
-#define TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD (24 * 60 * 60 * 4)
-#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
-#define TSDB_DEFAULT_DB_WAL_RETENTION_SIZE -1
-#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
-#define TSDB_DEFAULT_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
-#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
-#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
+#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
+#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 0
+#define TSDB_REPS_DEF_DB_WAL_RET_PERIOD (24 * 60 * 60 * 4)
+#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
+#define TSDB_REP_DEF_DB_WAL_RET_SIZE 0
+#define TSDB_REPS_DEF_DB_WAL_RET_SIZE -1
+#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
+#define TSDB_REP_DEF_DB_WAL_ROLL_PERIOD 0
+#define TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
+#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
+#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
#define TSDB_MIN_ROLLUP_MAX_DELAY 1 // unit millisecond
#define TSDB_MAX_ROLLUP_MAX_DELAY (15 * 60 * 1000)
@@ -386,7 +389,7 @@ typedef enum ELogicConditionType {
#define TSDB_DEFAULT_EXPLAIN_VERBOSE false
-#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16*1024)
+#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024)
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
#define TSDB_MAX_FIELD_LEN 16384
diff --git a/include/util/tencode.h b/include/util/tencode.h
index ad642cd612db4d1bb31f57b7a49d977e90978ee5..a6dd58297e8c1dba644d86eb5145b273406fbf9e 100644
--- a/include/util/tencode.h
+++ b/include/util/tencode.h
@@ -264,12 +264,14 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) {
static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) {
if (tEncodeU32v(pCoder, len) < 0) return -1;
- if (pCoder->data) {
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
- memcpy(TD_CODER_CURRENT(pCoder), val, len);
- }
+ if (len) {
+ if (pCoder->data) {
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
+ memcpy(TD_CODER_CURRENT(pCoder), val, len);
+ }
- TD_CODER_MOVE_POS(pCoder, len);
+ TD_CODER_MOVE_POS(pCoder, len);
+ }
return 0;
}
@@ -414,14 +416,18 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) {
uint64_t length = 0;
if (tDecodeU64v(pCoder, &length) < 0) return -1;
- if (len) *len = length;
+ if (length) {
+ if (len) *len = length;
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
- *val = taosMemoryMalloc(length);
- if (*val == NULL) return -1;
- memcpy(*val, TD_CODER_CURRENT(pCoder), length);
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
+ *val = taosMemoryMalloc(length);
+ if (*val == NULL) return -1;
+ memcpy(*val, TD_CODER_CURRENT(pCoder), length);
- TD_CODER_MOVE_POS(pCoder, length);
+ TD_CODER_MOVE_POS(pCoder, length);
+ } else {
+ *val = NULL;
+ }
return 0;
}
diff --git a/include/util/thash.h b/include/util/thash.h
index 781c22a56aaba0d449d1f711b32fe4bd75a39003..f4d09eb0906b04bfd40d97c39ec66feb3b1967a1 100644
--- a/include/util/thash.h
+++ b/include/util/thash.h
@@ -210,6 +210,8 @@ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp);
*/
void taosHashSetFreeFp(SHashObj *pHashObj, _hash_free_fn_t fp);
+int64_t taosHashGetCompTimes(SHashObj *pHashObj);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/util/tpagedbuf.h b/include/util/tpagedbuf.h
index ef266068cbaff046ec6ebcf0bf02d0b44ee9d3a2..9ab89273e6895c2ea322fa116c06332a431028bc 100644
--- a/include/util/tpagedbuf.h
+++ b/include/util/tpagedbuf.h
@@ -58,19 +58,17 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
/**
*
* @param pBuf
- * @param groupId
* @param pageId
* @return
*/
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId);
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId);
/**
*
* @param pBuf
- * @param groupId
* @return
*/
-SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf, int32_t groupId);
+SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf);
/**
* get the specified buffer page by id
@@ -101,13 +99,6 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, struct SPageInfo* pi);
*/
size_t getTotalBufSize(const SDiskbasedBuf* pBuf);
-/**
- * get the number of groups in the result buffer
- * @param pBuf
- * @return
- */
-size_t getNumOfBufGroupId(const SDiskbasedBuf* pBuf);
-
/**
* destroy result buffer
* @param pBuf
diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile
new file mode 100644
index 0000000000000000000000000000000000000000..77f642180a59932d8f8e4deaa2d91ac37e7268fc
--- /dev/null
+++ b/packaging/MPtestJenkinsfile
@@ -0,0 +1,236 @@
+def sync_source(branch_name) {
+ sh '''
+ hostname
+ ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//"
+ echo ''' + branch_name + '''
+ '''
+ sh '''
+ cd ${TDINTERNAL_ROOT_DIR}
+ git reset --hard
+ git fetch || git fetch
+ git checkout ''' + branch_name + ''' -f
+ git branch
+ git pull || git pull
+ git log | head -n 20
+ cd ${TDENGINE_ROOT_DIR}
+ git reset --hard
+ git fetch || git fetch
+ rm -rf examples/rust/
+ git checkout ''' + branch_name + ''' -f
+ git branch
+ git pull || git pull
+ git log | head -n 20
+ git submodule update --init --recursive
+ '''
+ return 1
+}
+def run_test() {
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+
+ '''
+ sh '''
+ export LD_LIBRARY_PATH=${TDINTERNAL_ROOT_DIR}/debug/build/lib
+ ./fulltest.sh
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/tests
+ ./test-all.sh b1fq
+ '''
+}
+def build_run() {
+ sync_source("${BRANCH_NAME}")
+}
+pipeline {
+ agent none
+ parameters {
+ string (
+ name:'version',
+ defaultValue:'3.0.0.1',
+ description: 'release version number,eg: 3.0.0.1 or 3.0.0.'
+ )
+ string (
+ name:'baseVersion',
+ defaultValue:'3.0.0.1',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ string (
+ name:'toolsVersion',
+ defaultValue:'2.1.2',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ string (
+ name:'toolsBaseVersion',
+ defaultValue:'2.1.2',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ string (
+ name:'nasPassword',
+ defaultValue:'password',
+ description: 'the pasword of the NAS server which has installPackage-192.168.1.131'
+ )
+ }
+ environment{
+ WORK_DIR = '/var/lib/jenkins/workspace'
+ TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal'
+ TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community'
+ BRANCH_NAME = '3.0'
+
+ TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz"
+ BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz"
+
+ TD_SERVER_ARM_TAR = "TDengine-server-${version}-Linux-arm64.tar.gz"
+ BASE_TD_SERVER_ARM_TAR = "TDengine-server-${baseVersion}-Linux-arm64.tar.gz"
+
+ TD_SERVER_LITE_TAR = "TDengine-server-${version}-Linux-x64-Lite.tar.gz"
+ BASE_TD_SERVER_LITE_TAR = "TDengine-server-${baseVersion}-Linux-x64-Lite.tar.gz"
+
+ TD_CLIENT_TAR = "TDengine-client-${version}-Linux-x64.tar.gz"
+ BASE_TD_CLIENT_TAR = "TDengine-client-${baseVersion}-Linux-x64.tar.gz"
+
+ TD_CLIENT_ARM_TAR = "TDengine-client-${version}-Linux-arm64.tar.gz"
+ BASE_TD_CLIENT_ARM_TAR = "TDengine-client-${baseVersion}-Linux-arm64.tar.gz"
+
+ TD_CLIENT_LITE_TAR = "TDengine-client-${version}-Linux-x64-Lite.tar.gz"
+ BASE_TD_CLIENT_LITE_TAR = "TDengine-client-${baseVersion}-Linux-x64-Lite.tar.gz"
+
+ TD_SERVER_RPM = "TDengine-server-${version}-Linux-x64.rpm"
+
+ TD_SERVER_DEB = "TDengine-server-${version}-Linux-x64.deb"
+
+ TD_SERVER_EXE = "TDengine-server-${version}-Windows-x64.exe"
+
+ TD_CLIENT_EXE = "TDengine-client-${version}-Windows-x64.exe"
+
+ TD_TOOLS_TAR = "taosTools-${toolsVersion}-Linux-x64.tar.gz"
+
+
+ }
+ stages {
+ stage ('RUN') {
+ parallel {
+ stage('ubuntu16') {
+ agent{label " ubuntu16 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ stage('ubuntu18') {
+ agent{label " ubuntu18 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ stage('centos7') {
+ agent{label " centos7_9 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ stage('centos8') {
+ agent{label " centos8_3 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+
+ stage('arm64') {
+ agent{label 'linux_arm64'}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index aae2e7c856ac7ce4747d798acf5852d6cdf21535..87f465fdb93ddbff8973430b11ecadc13878069d 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -38,7 +38,7 @@
# The interval of dnode reporting status to mnode
# statusInterval 1
-# The interval for taos shell to send heartbeat to mnode
+# The interval for TDengine CLI to send heartbeat to mnode
# shellActivityTimer 3
# The minimum sliding window time, milli-second
diff --git a/packaging/checkPackageRuning.py b/packaging/checkPackageRuning.py
new file mode 100755
index 0000000000000000000000000000000000000000..c0d1e8b86c3df2150b7f434e899c545439ab0477
--- /dev/null
+++ b/packaging/checkPackageRuning.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+# install pip
+# pip install src/connector/python/
+
+# -*- coding: utf-8 -*-
+import sys , os
+import getopt
+import subprocess
+# from this import d
+import time
+
+# install taospy
+
+out = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
+print("taospy version %s "%out)
+if (out == "" ):
+ os.system("pip install git+https://github.com/taosdata/taos-connector-python.git")
+ print("install taos python connector")
+else:
+ os.system("pip3 install --upgrade taospy ")
+
+
+
+# start taosd prepare
+os.system("rm -rf /var/lib/taos/*")
+os.system("systemctl restart taosd ")
+
+# wait a moment ,at least 5 seconds
+time.sleep(5)
+
+# prepare data by taosBenchmark
+
+os.system("taosBenchmark -y -n 100 -t 100")
+
+import taos
+
+conn = taos.connect(host="localhost",
+ user="root",
+ password="taosdata",
+ database="test",
+ port=6030,
+ config="/etc/taos", # for windows the default value is C:\TDengine\cfg
+ timezone="Asia/Shanghai") # default your host's timezone
+
+server_version = conn.server_info
+print("server_version", server_version)
+client_version = conn.client_info
+print("client_version", client_version) # 3.0.0.0
+
+# Execute a sql and get its result set. It's useful for SELECT statement
+result: taos.TaosResult = conn.query("SELECT count(*) from test.meters")
+
+data = result.fetch_all()
+
+if data[0][0] !=10000:
+ print(" taosBenchmark work not as expected ")
+ sys.exit(1)
+else:
+ print(" taosBenchmark work as expected ")
+
+# test taosdump dump out data and dump in data
+
+# dump out datas
+os.system("taosdump --version")
+os.system("mkdir -p /tmp/dumpdata")
+os.system("rm -rf /tmp/dumpdata/*")
+
+
+
+# dump data out
+print("taosdump dump out data")
+
+os.system("taosdump -o /tmp/dumpdata -D test -y ")
+
+# drop database of test
+print("drop database test")
+os.system(" taos -s ' drop database test ;' ")
+
+# dump data in
+print("taosdump dump data in")
+os.system("taosdump -i /tmp/dumpdata -y ")
+
+result = conn.query("SELECT count(*) from test.meters")
+
+data = result.fetch_all()
+
+if data[0][0] !=10000:
+ print(" taosdump work not as expected ")
+ sys.exit(1)
+else:
+ print(" taosdump work as expected ")
+
+conn.close()
\ No newline at end of file
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 3db9005f95a3027c42dd05b9f28d448ade5852cb..94a24a41487e8d7b82571bcc524392e4335d7fae 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -45,6 +45,7 @@ mkdir -p ${pkg_dir}${install_home_path}/include
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
+cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
diff --git a/packaging/debRpmAutoInstall.sh b/packaging/debRpmAutoInstall.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1f51378c91d14b5fcfd1eb4cca87a6cd472161cc
--- /dev/null
+++ b/packaging/debRpmAutoInstall.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/expect
+set packgeName [lindex $argv 0]
+set packageSuffix [lindex $argv 1]
+set timeout 3
+if { ${packageSuffix} == "deb" } {
+ spawn dpkg -i ${packgeName}
+} elseif { ${packageSuffix} == "rpm"} {
+ spawn rpm -ivh ${packgeName}
+}
+expect "*one:"
+send "\r"
+expect "*skip:"
+send "\r"
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
index e41182f471050af6b4d47b696eb237e319b2dd80..763ab73724587eb4dc231eb399a60937eaba6dca 100644
--- a/packaging/docker/README.md
+++ b/packaging/docker/README.md
@@ -47,7 +47,7 @@ taos> show databases;
Query OK, 1 row(s) in set (0.002843s)
```
-Since TDengine use container hostname to establish connections, it's a bit more complex to use taos shell and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use taos shell or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need.
+Since TDengine use container hostname to establish connections, it's a bit more complex to use TDengine CLI and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use TDengine CLI or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need.
### Start with host network
@@ -87,7 +87,7 @@ docker run -d \
This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`).
-If you want to use taos shell or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service.
+If you want to use TDengine CLI or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service.
If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable):
@@ -158,7 +158,7 @@ When you build your application with docker, you should add the TDengine client
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -265,7 +265,7 @@ Full version of dockerfile could be:
```dockerfile
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -279,7 +279,7 @@ RUN go env && go mod tidy && go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -391,7 +391,7 @@ test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp,
test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
```
-Check dnodes with taos shell:
+Check dnodes with TDengine CLI:
```bash
$ docker-compose exec td-1 taos -s "show dnodes"
diff --git a/packaging/release.bat b/packaging/release.bat
index 14534c8d7efd8cc923e3978d2b087847fc70dedf..591227382f9cec4a2fa1308a9b827994430f7236 100644
--- a/packaging/release.bat
+++ b/packaging/release.bat
@@ -40,7 +40,7 @@ if not exist %work_dir%\debug\ver-%2-x86 (
)
cd %work_dir%\debug\ver-%2-x64
call vcvarsall.bat x64
-cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -BUILD_TEST=false -DVERNUMBER=%2 -DCPUTYPE=x64
+cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DBUILD_TEST=false -DVERNUMBER=%2 -DCPUTYPE=x64
cmake --build .
rd /s /Q C:\TDengine
cmake --install .
diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh
new file mode 100755
index 0000000000000000000000000000000000000000..56da9e59be9379af9a1b96eadc88fe0e052e9863
--- /dev/null
+++ b/packaging/testpackage.sh
@@ -0,0 +1,245 @@
+#!/bin/sh
+#parameter
+scriptDir=$(dirname $(readlink -f $0))
+packgeName=$1
+version=$2
+originPackageName=$3
+originversion=$4
+testFile=$5
+subFile="taos.tar.gz"
+password=$6
+
+# Color setting
+RED='\033[41;30m'
+GREEN='\033[1;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[1;34m'
+GREEN_DARK='\033[0;32m'
+YELLOW_DARK='\033[0;33m'
+BLUE_DARK='\033[0;34m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+if [ ${testFile} = "server" ];then
+ tdPath="TDengine-server-${version}"
+ originTdpPath="TDengine-server-${originversion}"
+ installCmd="install.sh"
+elif [ ${testFile} = "client" ];then
+ tdPath="TDengine-client-${version}"
+ originTdpPath="TDengine-client-${originversion}"
+ installCmd="install_client.sh"
+elif [ ${testFile} = "tools" ];then
+ tdPath="taosTools-${version}"
+ originTdpPath="taosTools-${originversion}"
+ installCmd="install-taostools.sh"
+fi
+
+function cmdInstall {
+command=$1
+if command -v ${command} ;then
+ echoColor YD "${command} is already installed"
+else
+ if command -v apt ;then
+ apt-get install ${command} -y
+ elif command -v yum ;then
+ yum -y install ${command}
+ echoColor YD "you should install ${command} manually"
+ fi
+fi
+}
+
+function echoColor {
+color=$1
+command=$2
+
+if [ ${color} = 'Y' ];then
+ echo -e "${YELLOW}${command}${NC}"
+elif [ ${color} = 'YD' ];then
+ echo -e "${YELLOW_DARK}${command}${NC}"
+elif [ ${color} = 'R' ];then
+ echo -e "${RED}${command}${NC}"
+elif [ ${color} = 'G' ];then
+ echo -e "${GREEN}${command}${NC}\r\n"
+elif [ ${color} = 'B' ];then
+ echo -e "${BLUE}${command}${NC}"
+elif [ ${color} = 'BD' ];then
+ echo -e "${BLUE_DARK}${command}${NC}"
+fi
+}
+
+
+echoColor G "===== install basesoft ====="
+
+cmdInstall tree
+cmdInstall wget
+cmdInstall sshpass
+
+echoColor G "===== Uninstall all components of TDeingne ====="
+
+if command -v rmtaos ;then
+ echoColor YD "uninstall all components of TDeingne:rmtaos"
+ rmtaos
+else
+ echoColor YD "os doesn't include TDengine"
+fi
+
+if command -v rmtaostools ;then
+ echoColor YD "uninstall all components of TDeingne:rmtaostools"
+ rmtaostools
+else
+ echoColor YD "os doesn't include rmtaostools "
+fi
+
+
+
+
+echoColor G "===== new workroom path ====="
+installPath="/usr/local/src/packageTest"
+oriInstallPath="/usr/local/src/packageTest/3.1"
+
+if [ ! -d ${installPath} ] ;then
+ echoColor BD "mkdir -p ${installPath}"
+ mkdir -p ${installPath}
+else
+ echoColor YD "${installPath} already exists"
+fi
+
+if [ -d ${installPath}/${tdPath} ] ;then
+ echoColor BD "rm -rf ${installPath}/${tdPath}/*"
+ rm -rf ${installPath}/${tdPath}/*
+fi
+
+if [ ! -d ${oriInstallPath} ] ;then
+ echoColor BD "mkdir -p ${oriInstallPath}"
+ mkdir -p ${oriInstallPath}
+else
+ echoColor YD "${oriInstallPath} already exists"
+fi
+
+if [ -d ${oriInstallPath}/${originTdpPath} ] ;then
+ echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}/*"
+ rm -rf ${oriInstallPath}/${originTdpPath}/*
+fi
+
+
+echoColor G "===== download installPackage ====="
+# cd ${installPath}
+# wget https://www.taosdata.com/assets-download/3.0/${packgeName}
+# cd ${oriInstallPath}
+# wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
+
+cd ${installPath}
+cp -r ${scriptDir}/debRpmAutoInstall.sh .
+
+if [ ! -f {packgeName} ];then
+ echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ."
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} .
+fi
+
+packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}')
+
+
+if [ ! -f debRpmAutoInstall.sh ];then
+ echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
+ echo 'set packgeName [lindex $argv 0]' >> debRpmAutoInstall.sh
+ echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
+ echo 'set timeout 3 ' >> debRpmAutoInstall.sh
+ echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
+ echo ' spawn dpkg -i ${packgeName} ' >> debRpmAutoInstall.sh
+ echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh
+ echo ' spawn rpm -ivh ${packgeName}' >> debRpmAutoInstall.sh
+ echo '}' >> debRpmAutoInstall.sh
+ echo 'expect "*one:"' >> debRpmAutoInstall.sh
+ echo 'send "\r"' >> debRpmAutoInstall.sh
+ echo 'expect "*skip:"' >> debRpmAutoInstall.sh
+ echo 'send "\r" ' >> debRpmAutoInstall.sh
+fi
+
+
+echoColor G "===== instal Package ====="
+
+if [[ ${packgeName} =~ "deb" ]];then
+ cd ${installPath}
+ dpkg -r taostools
+ dpkg -r tdengine
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
+ else
+ echoColor BD "dpkg -i ${packgeName}" && dpkg -i ${packgeName}
+ fi
+elif [[ ${packgeName} =~ "rpm" ]];then
+ cd ${installPath}
+ sudo rpm -e tdengine
+ sudo rpm -e taostools
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
+ else
+ echoColor BD "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName}
+ fi
+elif [[ ${packgeName} =~ "tar" ]];then
+ echoColor G "===== check installPackage File of tar ====="
+ cd ${oriInstallPath}
+ if [ ! -f {originPackageName} ];then
+ echoColor YD "download base installPackage"
+ echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} ."
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} .
+ fi
+ echoColor YD "unzip the base installation package"
+ echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName}
+ cd ${installPath}
+ echoColor YD "unzip the new installation package"
+ echoColor BD "tar -xf ${packgeName}" && tar -xf ${packgeName}
+
+ if [ ${testFile} != "tools" ] ;then
+ cd ${installPath}/${tdPath} && tar xf ${subFile}
+ cd ${oriInstallPath}/${originTdpPath} && tar xf ${subFile}
+ fi
+
+ cd ${oriInstallPath}/${originTdpPath} && tree > ${installPath}/base_${originversion}_checkfile
+ cd ${installPath}/${tdPath} && tree > ${installPath}/now_${version}_checkfile
+
+ cd ${installPath}
+ diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
+ diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
+
+ if [ ${diffNumbers} != 0 ];then
+ echoColor R "The number and names of files is different from the previous installation package"
+ echoColor Y `cat ${installPath}/diffFile.log`
+ exit -1
+ else
+ echoColor G "The number and names of files are the same as previous installation packages"
+ fi
+ echoColor YD "===== install Package of tar ====="
+ cd ${installPath}/${tdPath}
+ if [ ${testFile} = "server" ];then
+ echoColor BD "bash ${installCmd} -e no "
+ bash ${installCmd} -e no
+ else
+ echoColor BD "bash ${installCmd} "
+ bash ${installCmd}
+ fi
+fi
+
+cd ${installPath}
+
+if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
+ echoColor G "===== install taos-tools when package is lite or client ====="
+ cd ${installPath}
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
+ # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz
+ tar xf taosTools-2.1.2-Linux-x64.tar.gz
+ cd taosTools-2.1.2 && bash install-taostools.sh
+elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
+ echoColor G "===== install taos-tools when package is lite or client ====="
+ cd ${installPath}
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
+ tar xf taosTools-2.1.2-Linux-x64.tar.gz
+ cd taosTools-2.1.2 && bash install-taostools.sh
+elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
+ echoColor G "===== install taos-tools when package is lite or client ====="
+ cd ${installPath}
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
+ tar xf taosTools-2.1.2-Linux-x64.tar.gz
+ cd taosTools-2.1.2 && bash install-taostools.sh
+fi
+
diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt
index f52edbe71f151398c5ebdcd705eab376f2318aae..e8e3c878496c58631131922cc6de47491d548f06 100644
--- a/source/client/CMakeLists.txt
+++ b/source/client/CMakeLists.txt
@@ -27,11 +27,18 @@ else()
INCLUDE_DIRECTORIES(jni/linux)
endif()
+set_target_properties(
+ taos
+ PROPERTIES
+ CLEAN_DIRECT_OUTPUT
+ 1
+)
+
set_target_properties(
taos
PROPERTIES
VERSION ${TD_VER_NUMBER}
- SOVERSION ${TD_VER_NUMBER}
+ SOVERSION 1
)
add_library(taos_static STATIC ${CLIENT_SRC})
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index f275ae0885f10663b3c0ae853ecf1298fac25777..b8fa9580e70c1c7aa17a1402ce6c8113a7f8e094 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -52,15 +52,17 @@ enum {
RES_TYPE__QUERY = 1,
RES_TYPE__TMQ,
RES_TYPE__TMQ_META,
+ RES_TYPE__TAOSX,
};
#define SHOW_VARIABLES_RESULT_COLS 2
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
-#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
-#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
-#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
+#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ || *(int8_t*)res == RES_TYPE__TAOSX)
+#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_TMQ_TAOSX(res) (*(int8_t*)res == RES_TYPE__TAOSX)
typedef struct SAppInstInfo SAppInstInfo;
@@ -95,10 +97,17 @@ typedef struct {
} SClientHbMgr;
typedef struct SQueryExecMetric {
- int64_t start; // start timestamp, us
- int64_t parsed; // start to parse, us
- int64_t send; // start to send to server, us
- int64_t rsp; // receive response from server, us
+ int64_t start; // start timestamp, us
+ int64_t syntaxStart; // start to parse, us
+ int64_t syntaxEnd; // end to parse, us
+ int64_t ctgStart; // start to parse, us
+ int64_t ctgEnd; // end to parse, us
+ int64_t semanticEnd;
+ int64_t planEnd;
+ int64_t resultReady;
+ int64_t execEnd;
+ int64_t send; // start to send to server, us
+ int64_t rsp; // receive response from server, us
} SQueryExecMetric;
struct SAppInstInfo {
@@ -132,6 +141,7 @@ typedef struct STscObj {
char db[TSDB_DB_FNAME_LEN];
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
+ int8_t sysInfo;
int8_t connType;
int32_t acctId;
uint32_t connId;
@@ -192,8 +202,8 @@ typedef struct {
int32_t vgId;
SSchemaWrapper schema;
int32_t resIter;
- SMqDataRsp rsp;
SReqResultInfo resInfo;
+ SMqDataRsp rsp;
} SMqRspObj;
typedef struct {
@@ -204,6 +214,17 @@ typedef struct {
SMqMetaRsp metaRsp;
} SMqMetaRspObj;
+typedef struct {
+ int8_t resType;
+ char topic[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
+ int32_t vgId;
+ SSchemaWrapper schema;
+ int32_t resIter;
+ SReqResultInfo resInfo;
+ STaosxRsp rsp;
+} SMqTaosxRspObj;
+
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
@@ -252,7 +273,7 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly);
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
-int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
+int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) {
SMqRspObj* msg = (SMqRspObj*)res;
@@ -363,8 +384,9 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData*
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta);
-int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clientImpl.c and become a static function
-int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx
+int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
+int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog);
+int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest);
#ifdef __cplusplus
diff --git a/source/client/inc/clientLog.h b/source/client/inc/clientLog.h
index d47edcd79535a3c8fc5d94aabd3bd8b08d0448f7..ec0a41a68f9515bc7ea2c54e96b0235c0a9683eb 100644
--- a/source/client/inc/clientLog.h
+++ b/source/client/inc/clientLog.h
@@ -29,6 +29,7 @@ extern "C" {
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
+#define tscPerf(...) do { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); } while(0)
#ifdef __cplusplus
}
diff --git a/source/client/src/TMQConnector.c b/source/client/src/TMQConnector.c
index 17d3a212c482c3462e542721d7d57f516250ff13..fcf6957df92e92b990c60cd3b41342dbbf90ae9e 100644
--- a/source/client/src/TMQConnector.c
+++ b/source/client/src/TMQConnector.c
@@ -42,6 +42,7 @@ void commit_cb(tmq_t *tmq, int32_t code, void *param) {
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConfNewImp(JNIEnv *env, jobject jobj) {
tmq_conf_t *conf = tmq_conf_new();
+ jniGetGlobalMethod(env);
return (jlong)conf;
}
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 99ecab9642b6fd164572957c50f409172c5dc654..bf92a9ba6af41d8c3048684ed36eec84cc0a6235 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -69,14 +69,26 @@ static void deregisterRequest(SRequestObj *pRequest) {
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
- int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
+ int64_t nowUs = taosGetTimestampUs();
+ int64_t duration = nowUs - pRequest->metric.start;
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
" ms, current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
+ tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
+ "us, exec:%" PRId64 "us",
+ duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
+ pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
+ pRequest->metric.execEnd - pRequest->metric.semanticEnd);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
+ tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
+ "us, planner:%" PRId64 "us, exec:%" PRId64 "us",
+ duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
+ pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
+ pRequest->metric.planEnd - pRequest->metric.semanticEnd,
+ pRequest->metric.resultReady - pRequest->metric.planEnd);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
}
@@ -330,7 +342,6 @@ void doDestroyRequest(void *p) {
schedulerFreeJob(&pRequest->body.queryJob, 0);
taosMemoryFreeClear(pRequest->msgBuf);
- taosMemoryFreeClear(pRequest->sqlstr);
taosMemoryFreeClear(pRequest->pDb);
doFreeReqResultInfo(&pRequest->body.resInfo);
@@ -349,6 +360,7 @@ void doDestroyRequest(void *p) {
taosMemoryFree(pRequest->body.param);
}
+ taosMemoryFreeClear(pRequest->sqlstr);
taosMemoryFree(pRequest);
tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest);
}
diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c
index 9475d1b51e51d093bcf7335d1668908e0c039a80..56e3527f9684e17f58c7b0fca5bb7a3fa6210d1c 100644
--- a/source/client/src/clientHb.c
+++ b/source/client/src/clientHb.c
@@ -145,7 +145,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
}
static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
if (NULL == pReq) {
tscWarn("pReq to get activeInfo, may be dropped, refId:%" PRIx64 ", type:%d", pRsp->connKey.tscRid,
pRsp->connKey.connType);
@@ -260,6 +260,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
}
}
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
+
return TSDB_CODE_SUCCESS;
}
@@ -914,10 +916,11 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
}
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
if (pReq) {
tFreeClientHbReq(pReq);
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
}
if (NULL == pReq) {
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 5f0af55d13c3e3c79f796f5f34f31dff121f1281..f91ceb31840bbf8dccd9144d5a12a41e7f2f358a 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -215,6 +215,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
.pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
+ .enableSysInfo = pTscObj->sysInfo,
.svrVer = pTscObj->sVer,
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)};
@@ -246,7 +247,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL;
- int32_t code = qExecCommand(pQuery->pRoot, &pRsp);
+ int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@@ -284,7 +285,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
return;
}
- int32_t code = qExecCommand(pQuery->pRoot, &pRsp);
+ int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@@ -419,7 +420,8 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
- .pUser = pRequest->pTscObj->user};
+ .pUser = pRequest->pTscObj->user,
+ .sysInfo = pRequest->pTscObj->sysInfo};
return qCreateQueryPlan(&cxt, pPlan, pNodeList);
}
@@ -721,6 +723,12 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
SSubmitBlkRsp* blk = pRsp->pBlocks + i;
+ if (blk->pMeta) {
+ handleCreateTbExecRes(blk->pMeta, pCatalog);
+ tFreeSTableMetaRsp(blk->pMeta);
+ taosMemoryFreeClear(blk->pMeta);
+ }
+
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
continue;
}
@@ -780,6 +788,10 @@ int32_t handleAlterTbExecRes(void* res, SCatalog* pCatalog) {
return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res);
}
+int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) {
+ return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res);
+}
+
int32_t handleQueryExecRsp(SRequestObj* pRequest) {
if (NULL == pRequest->body.resInfo.execRes.res) {
return TSDB_CODE_SUCCESS;
@@ -802,6 +814,19 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
code = handleAlterTbExecRes(pRes->res, pCatalog);
break;
}
+ case TDMT_VND_CREATE_TABLE: {
+ SArray* pList = (SArray*)pRes->res;
+ int32_t num = taosArrayGetSize(pList);
+ for (int32_t i = 0; i < num; ++i) {
+ void* res = taosArrayGetP(pList, i);
+ code = handleCreateTbExecRes(res, pCatalog);
+ }
+ break;
+ }
+ case TDMT_MND_CREATE_STB: {
+ code = handleCreateTbExecRes(pRes->res, pCatalog);
+ break;
+ }
case TDMT_VND_SUBMIT: {
atomic_add_fetch_64((int64_t*)&pAppInfo->summary.insertBytes, pRes->numOfBytes);
@@ -826,6 +851,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SRequestObj* pRequest = (SRequestObj*)param;
pRequest->code = code;
+ pRequest->metric.resultReady = taosGetTimestampUs();
+
if (pResult) {
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
}
@@ -842,6 +869,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
}
schedulerFreeJob(&pRequest->body.queryJob, 0);
+
+ pRequest->metric.execEnd = taosGetTimestampUs();
}
taosMemoryFree(pResult);
@@ -859,17 +888,13 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
return;
}
- if (code == TSDB_CODE_SUCCESS) {
- code = handleQueryExecRsp(pRequest);
- ASSERT(pRequest->code == TSDB_CODE_SUCCESS);
- pRequest->code = code;
- }
-
tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type));
- if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
+ if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) {
removeMeta(pTscObj, pRequest->targetTableList);
}
+ handleQueryExecRsp(pRequest);
+
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
}
@@ -930,6 +955,10 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
qDestroyQuery(pQuery);
}
+ if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) {
+ removeMeta(pRequest->pTscObj, pRequest->targetTableList);
+ }
+
handleQueryExecRsp(pRequest);
if (NULL != pRequest && TSDB_CODE_SUCCESS != code) {
@@ -990,7 +1019,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
- .pUser = pRequest->pTscObj->user};
+ .pUser = pRequest->pTscObj->user,
+ .sysInfo = pRequest->pTscObj->sysInfo};
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
SQueryPlan* pDag = NULL;
@@ -1002,6 +1032,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
pRequest->body.subplanNum = pDag->numOfSubplans;
}
+ pRequest->metric.planEnd = taosGetTimestampUs();
+
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
@@ -1127,10 +1159,6 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
inRetry = true;
} while (retryNum++ < REQUEST_TOTAL_EXEC_TIMES);
- if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
- removeMeta(pRequest->pTscObj, pRequest->targetTableList);
- }
-
return pRequest;
}
@@ -1575,10 +1603,11 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
}
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
- int32_t cols = *(int32_t*) (p + sizeof(int32_t) * 3);
+ int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
ASSERT(numOfCols == cols);
- return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t)*3 + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t));
+ return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) * 3 + sizeof(uint64_t) +
+ numOfCols * (sizeof(int8_t) + sizeof(int32_t));
}
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
@@ -1950,7 +1979,7 @@ _OVER:
int32_t appendTbToReq(SHashObj* pHash, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str,
int32_t acctId, char* db) {
- SName name;
+ SName name = {0};
if (len1 <= 0) {
return -1;
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index f449641f1008e79a58e02786a855711dbaeb6b9c..30860780807a820b041e27729f8e351fb46c99b3 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -184,6 +184,19 @@ void taos_free_result(TAOS_RES *res) {
SRequestObj *pRequest = (SRequestObj *)res;
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
destroyRequest(pRequest);
+ } else if (TD_RES_TMQ_TAOSX(res)) {
+ SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res;
+ if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
+ if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
+ if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
+ if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ // taosx
+ taosArrayDestroy(pRsp->rsp.createTableLen);
+ taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree);
+
+ pRsp->resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&pRsp->resInfo);
+ taosMemoryFree(pRsp);
} else if (TD_RES_TMQ(res)) {
SMqRspObj *pRsp = (SMqRspObj *)res;
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
@@ -685,6 +698,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
SQuery *pQuery = pWrapper->pQuery;
SRequestObj *pRequest = pWrapper->pRequest;
+ pRequest->metric.ctgEnd = taosGetTimestampUs();
+
if (code == TSDB_CODE_SUCCESS) {
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
pRequest->stableQuery = pQuery->stableQuery;
@@ -693,6 +708,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
}
}
+ pRequest->metric.semanticEnd = taosGetTimestampUs();
+
if (code == TSDB_CODE_SUCCESS) {
if (pQuery->haveResultSet) {
setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols);
@@ -755,6 +772,7 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
.pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
+ .enableSysInfo = pTscObj->sysInfo,
.async = true,
.svrVer = pTscObj->sVer,
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)};
@@ -784,12 +802,16 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
SQuery *pQuery = NULL;
+ pRequest->metric.syntaxStart = taosGetTimestampUs();
+
SCatalogReq catalogReq = {.forceUpdate = updateMetaForce, .qNodeRequired = qnodeRequired(pRequest)};
code = qParseSqlSyntax(pCxt, &pQuery, &catalogReq);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
+ pRequest->metric.syntaxEnd = taosGetTimestampUs();
+
if (!updateMetaForce) {
STscObj *pTscObj = pRequest->pTscObj;
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
@@ -816,6 +838,8 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
.requestObjRefId = pCxt->requestRid,
.mgmtEps = pCxt->mgmtEpSet};
+ pRequest->metric.ctgStart = taosGetTimestampUs();
+
code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, &catalogReq, retrieveMetaCallback, pWrapper,
&pRequest->body.queryJob);
pCxt = NULL;
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index 0c4cf23c4e1708f4479a1b744dea37752513670d..a7a16d484ca10a8baa65419105f42e46dc3814f3 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -96,6 +96,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, pTscObj->id);
}
+ pTscObj->sysInfo = connectRsp.sysInfo;
pTscObj->connId = connectRsp.connId;
pTscObj->acctId = connectRsp.acctId;
tstrncpy(pTscObj->sVer, connectRsp.sVer, tListLen(pTscObj->sVer));
@@ -232,13 +233,36 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
assert(pMsg != NULL && param != NULL);
SRequestObj* pRequest = param;
- taosMemoryFree(pMsg->pData);
if (code != TSDB_CODE_SUCCESS) {
setErrno(pRequest, code);
+ } else {
+ SMCreateStbRsp createRsp = {0};
+ SDecoder coder = {0};
+ tDecoderInit(&coder, pMsg->pData, pMsg->len);
+ tDecodeSMCreateStbRsp(&coder, &createRsp);
+ tDecoderClear(&coder);
+
+ pRequest->body.resInfo.execRes.msgType = TDMT_MND_CREATE_STB;
+ pRequest->body.resInfo.execRes.res = createRsp.pMeta;
}
+ taosMemoryFree(pMsg->pData);
+
if (pRequest->body.queryFp != NULL) {
- removeMeta(pRequest->pTscObj, pRequest->tableList);
+ SExecResult* pRes = &pRequest->body.resInfo.execRes;
+
+ if (code == TSDB_CODE_SUCCESS) {
+ SCatalog* pCatalog = NULL;
+ int32_t ret = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (pRes->res != NULL) {
+ ret = handleCreateTbExecRes(pRes->res, pCatalog);
+ }
+
+ if (ret != TSDB_CODE_SUCCESS) {
+ code = ret;
+ }
+ }
+
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index 9f905a835241d54722cf3e15056d1d1019123dcf..c6ba2d68921a3751bf5b0ab61447dcac6a9a820e 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -85,8 +85,11 @@ typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
typedef enum {
SCHEMA_ACTION_NULL,
- SCHEMA_ACTION_COLUMN,
- SCHEMA_ACTION_TAG
+ SCHEMA_ACTION_CREATE_STABLE,
+ SCHEMA_ACTION_ADD_COLUMN,
+ SCHEMA_ACTION_ADD_TAG,
+ SCHEMA_ACTION_CHANGE_COLUMN_SIZE,
+ SCHEMA_ACTION_CHANGE_TAG_SIZE,
} ESchemaAction;
typedef struct {
@@ -219,7 +222,7 @@ static int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const
static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSmlKv *kv, bool isTag,
ESchemaAction *action, SSmlHandle *info) {
- uint16_t *index = (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen);
+ uint16_t *index = colHash ? (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen) : NULL;
if (index) {
if (colField[*index].type != kv->type) {
uError("SML:0x%" PRIx64 " point type and db type mismatch. key: %s. point type: %d, db type: %d", info->id,
@@ -232,16 +235,16 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
(colField[*index].type == TSDB_DATA_TYPE_NCHAR &&
((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) {
if (isTag) {
- *action = SCHEMA_ACTION_TAG;
+ *action = SCHEMA_ACTION_CHANGE_TAG_SIZE;
} else {
- *action = SCHEMA_ACTION_COLUMN;
+ *action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE;
}
}
} else {
if (isTag) {
- *action = SCHEMA_ACTION_TAG;
+ *action = SCHEMA_ACTION_ADD_TAG;
} else {
- *action = SCHEMA_ACTION_COLUMN;
+ *action = SCHEMA_ACTION_ADD_COLUMN;
}
}
return 0;
@@ -310,9 +313,31 @@ static int32_t getBytes(uint8_t type, int32_t length){
}
}
+static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray* results, int32_t numOfCols, bool isTag) {
+ for (int j = 0; j < taosArrayGetSize(cols); ++j) {
+ SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, j);
+ ESchemaAction action = SCHEMA_ACTION_NULL;
+ smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, &action, info);
+ if(action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_ADD_TAG){
+ SField field = {0};
+ field.type = kv->type;
+ field.bytes = getBytes(kv->type, kv->length);
+ memcpy(field.name, kv->key, kv->keyLen);
+ taosArrayPush(results, &field);
+ }else if(action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE || action == SCHEMA_ACTION_CHANGE_TAG_SIZE){
+ uint16_t *index = (uint16_t *)taosHashGet(schemaHash, kv->key, kv->keyLen);
+ uint16_t newIndex = *index;
+ if(isTag) newIndex -= numOfCols;
+ SField *field = (SField *)taosArrayGet(results, newIndex);
+ field->bytes = getBytes(kv->type, kv->length);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
//static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
// int32_t colVer, int32_t tagVer, int8_t source, uint64_t suid){
-static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
+static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray* pColumns, SArray* pTags,
STableMeta *pTableMeta, ESchemaAction action){
SRequestObj* pRequest = NULL;
@@ -320,101 +345,58 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *s
int32_t code = TSDB_CODE_SUCCESS;
SCmdMsgInfo pCmdMsg = {0};
+ // put front for free
+ pReq.numOfColumns = taosArrayGetSize(pColumns);
+ pReq.pColumns = pColumns;
+ pReq.numOfTags = taosArrayGetSize(pTags);
+ pReq.pTags = pTags;
+
code = buildRequest(info->taos->id, "", 0, NULL, false, &pRequest);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
}
- if (action == SCHEMA_ACTION_NULL){
+ if (action == SCHEMA_ACTION_CREATE_STABLE){
pReq.colVer = 1;
pReq.tagVer = 1;
pReq.suid = 0;
pReq.source = TD_REQ_FROM_APP;
- } else if (action == SCHEMA_ACTION_TAG){
+ } else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE){
pReq.colVer = pTableMeta->sversion;
pReq.tagVer = pTableMeta->tversion + 1;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
- } else if (action == SCHEMA_ACTION_COLUMN){
+ } else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE){
pReq.colVer = pTableMeta->sversion + 1;
pReq.tagVer = pTableMeta->tversion;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
}
+ if (pReq.numOfTags == 0){
+ pReq.numOfTags = 1;
+ SField field = {0};
+ field.type = TSDB_DATA_TYPE_NCHAR;
+ field.bytes = 1;
+ strcpy(field.name, tsSmlTagName);
+ taosArrayPush(pReq.pTags, &field);
+ }
+
pReq.commentLen = -1;
pReq.igExists = true;
tNameExtractFullName(pName, pReq.name);
- if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_COLUMN){
- pReq.numOfColumns = taosArrayGetSize(sTableData->cols);
- pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField));
- for (int i = 0; i < pReq.numOfColumns; i++) {
- SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->cols, i);
- SField field = {0};
- field.type = kv->type;
- field.bytes = getBytes(kv->type, kv->length);
- memcpy(field.name, kv->key, kv->keyLen);
- taosArrayPush(pReq.pColumns, &field);
- }
- }else if (action == SCHEMA_ACTION_TAG){
- pReq.numOfColumns = pTableMeta->tableInfo.numOfColumns;
- pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField));
- for (int i = 0; i < pReq.numOfColumns; i++) {
- SSchema *s = &pTableMeta->schema[i];
- SField field = {0};
- field.type = s->type;
- field.bytes = s->bytes;
- strcpy(field.name, s->name);
- taosArrayPush(pReq.pColumns, &field);
- }
- }
-
- if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_TAG){
- pReq.numOfTags = taosArrayGetSize(sTableData->tags);
- if (pReq.numOfTags == 0){
- pReq.numOfTags = 1;
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- SField field = {0};
- field.type = TSDB_DATA_TYPE_NCHAR;
- field.bytes = 1;
- strcpy(field.name, tsSmlTagName);
- taosArrayPush(pReq.pTags, &field);
- }else{
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- for (int i = 0; i < pReq.numOfTags; i++) {
- SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->tags, i);
- SField field = {0};
- field.type = kv->type;
- field.bytes = getBytes(kv->type, kv->length);
- memcpy(field.name, kv->key, kv->keyLen);
- taosArrayPush(pReq.pTags, &field);
- }
- }
- }else if (action == SCHEMA_ACTION_COLUMN){
- pReq.numOfTags = pTableMeta->tableInfo.numOfTags;
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- for (int i = 0; i < pReq.numOfTags; i++) {
- SSchema *s = &pTableMeta->schema[i + pTableMeta->tableInfo.numOfColumns];
- SField field = {0};
- field.type = s->type;
- field.bytes = s->bytes;
- strcpy(field.name, s->name);
- taosArrayPush(pReq.pTags, &field);
- }
- }
-
pCmdMsg.epSet = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
pCmdMsg.msgType = TDMT_MND_CREATE_STB;
pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
if (NULL == pCmdMsg.pMsg) {
- tFreeSMCreateStbReq(&pReq);
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
@@ -442,7 +424,10 @@ end:
}
static int32_t smlModifyDBSchemas(SSmlHandle *info) {
- int32_t code = 0;
+ int32_t code = 0;
+ SHashObj *hashTmp = NULL;
+ STableMeta *pTableMeta = NULL;
+
SName pName = {TSDB_TABLE_NAME_T, info->taos->acctId, {0}, {0}};
strcpy(pName.dbname, info->pRequest->pDb);
@@ -455,7 +440,6 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
SSmlSTableMeta **tableMetaSml = (SSmlSTableMeta **)taosHashIterate(info->superTables, NULL);
while (tableMetaSml) {
SSmlSTableMeta *sTableData = *tableMetaSml;
- STableMeta *pTableMeta = NULL;
bool needCheckMeta = false; // for multi thread
size_t superTableLen = 0;
@@ -466,14 +450,19 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) {
- code = smlSendMetaMsg(info, &pName, sTableData, NULL, SCHEMA_ACTION_NULL);
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField));
+ smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true);
+ smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
info->cost.numOfCreateSTables++;
} else if (code == TSDB_CODE_SUCCESS) {
- SHashObj *hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags,
+ hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags,
taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
for (uint16_t i = pTableMeta->tableInfo.numOfColumns;
i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
@@ -483,36 +472,72 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
ESchemaAction action = SCHEMA_ACTION_NULL;
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, &action, true);
if (code != TSDB_CODE_SUCCESS) {
- taosHashCleanup(hashTmp);
goto end;
}
- if (action == SCHEMA_ACTION_TAG){
- code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action);
+ if (action != SCHEMA_ACTION_NULL){
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
+
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
+ SField field = {0};
+ field.type = pTableMeta->schema[i].type;
+ field.bytes = pTableMeta->schema[i].bytes;
+ strcpy(field.name, pTableMeta->schema[i].name);
+ if(i < pTableMeta->tableInfo.numOfColumns){
+ taosArrayPush(pColumns, &field);
+ }else{
+ taosArrayPush(pTags, &field);
+ }
+ }
+ smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->tags, pTags, pTableMeta->tableInfo.numOfColumns, true);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
}
+ taosMemoryFreeClear(pTableMeta);
code = catalogRefreshTableMeta(info->pCatalog, &conn, &pName, -1);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
+ code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
taosHashClear(hashTmp);
- for (uint16_t i = 1; i < pTableMeta->tableInfo.numOfColumns; i++) {
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
}
action = SCHEMA_ACTION_NULL;
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &action, false);
- taosHashCleanup(hashTmp);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
- if (action == SCHEMA_ACTION_COLUMN){
- code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action);
+ if (action != SCHEMA_ACTION_NULL){
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
+
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
+ SField field = {0};
+ field.type = pTableMeta->schema[i].type;
+ field.bytes = pTableMeta->schema[i].bytes;
+ strcpy(field.name, pTableMeta->schema[i].name);
+ if(i < pTableMeta->tableInfo.numOfColumns){
+ taosArrayPush(pColumns, &field);
+ }else{
+ taosArrayPush(pTags, &field);
+ }
+ }
+
+ smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->cols, pColumns, pTableMeta->tableInfo.numOfColumns, false);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
}
@@ -526,11 +551,11 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
uError("SML:0x%" PRIx64 " load table meta error: %s", info->id, tstrerror(code));
goto end;
}
- if (pTableMeta) taosMemoryFree(pTableMeta);
+ taosMemoryFreeClear(pTableMeta);
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, pName.tname);
goto end;
}
@@ -538,12 +563,12 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags,
sTableData->tags, true);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, pName.tname);
goto end;
}
code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, pName.tname);
goto end;
}
}
@@ -551,10 +576,13 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
sTableData->tableMeta = pTableMeta;
tableMetaSml = (SSmlSTableMeta **)taosHashIterate(info->superTables, tableMetaSml);
+ taosHashCleanup(hashTmp);
}
return 0;
end:
+ taosHashCleanup(hashTmp);
+ taosMemoryFreeClear(pTableMeta);
catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1);
return code;
}
@@ -1531,7 +1559,7 @@ cleanup:
/************* TSDB_SML_JSON_PROTOCOL function start **************/
static int32_t smlJsonCreateSring(const char **output, char *input, int32_t inputLen) {
- *output = (const char *)taosMemoryMalloc(inputLen);
+ *output = (const char *)taosMemoryCalloc(1, inputLen);
if (*output == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -2057,10 +2085,6 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
if (info->dataFormat) taosArrayDestroy(cols);
return ret;
}
- if (taosArrayGetSize(cols) > TSDB_MAX_COLUMNS) {
- smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL);
- return TSDB_CODE_PAR_TOO_MANY_COLUMNS;
- }
bool hasTable = true;
SSmlTableInfo *tinfo = NULL;
@@ -2094,6 +2118,11 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
return TSDB_CODE_PAR_INVALID_TAGS_NUM;
}
+ if (taosArrayGetSize(cols) + taosArrayGetSize((*oneTable)->tags) > TSDB_MAX_COLUMNS) {
+ smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL);
+ return TSDB_CODE_PAR_TOO_MANY_COLUMNS;
+ }
+
(*oneTable)->sTableName = elements.measure;
(*oneTable)->sTableNameLen = elements.measureLen;
if (strlen((*oneTable)->childTableName) == 0) {
@@ -2421,9 +2450,11 @@ static void smlInsertCallback(void *param, void *res, int32_t code) {
uDebug("SML:0x%" PRIx64 " result. code:%d, msg:%s", info->id, pRequest->code, pRequest->msgBuf);
// lock
taosThreadSpinLock(&info->params->lock);
- info->params->request->body.resInfo.numOfRows += rows;
if (code != TSDB_CODE_SUCCESS) {
info->params->request->code = code;
+ info->params->request->body.resInfo.numOfRows += rows;
+ }else{
+ info->params->request->body.resInfo.numOfRows += info->affectedRows;
}
taosThreadSpinUnlock(&info->params->lock);
// unlock
diff --git a/source/client/src/taosx.c b/source/client/src/taosx.c
index 677567e38ffcecefaa72373ac02a976cb2078676..f016120a1f5cbe1b4baf4e42f76a0fea7c80d353 100644
--- a/source/client/src/taosx.c
+++ b/source/client/src/taosx.c
@@ -765,6 +765,29 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
}
taosArrayPush(pRequest->tableList, &pName);
+ // change tag cid to new cid
+ if(pCreateReq->type == TSDB_CHILD_TABLE){
+ STableMeta* pTableMeta = NULL;
+ SName sName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.name, &sName);
+ code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
+ if(code != TSDB_CODE_SUCCESS){
+ uError("taosCreateTable:catalogGetTableMeta failed. table name: %s", pCreateReq->ctb.name);
+ goto end;
+ }
+
+ for(int32_t i = 0; i < taosArrayGetSize(pCreateReq->ctb.tagName); i++){
+ char* tName = taosArrayGet(pCreateReq->ctb.tagName, i);
+ for(int32_t j = pTableMeta->tableInfo.numOfColumns; j < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; j++){
+ SSchema *tag = &pTableMeta->schema[j];
+ if(strcmp(tag->name, tName) == 0 && tag->type != TSDB_DATA_TYPE_JSON){
+ tTagSetCid((STag *)pCreateReq->ctb.pTag, i, tag->colId);
+ }
+ }
+ }
+ taosMemoryFreeClear(pTableMeta);
+ }
+
SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
if (pTableBatch == NULL) {
SVgroupCreateTableBatch tBatch = {0};
@@ -1305,6 +1328,7 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
SQuery* pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
+ STableMeta* pTableMeta = NULL;
terrno = TSDB_CODE_SUCCESS;
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
@@ -1361,24 +1385,6 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
goto end;
}
- uint16_t fLen = 0;
- int32_t rowSize = 0;
- int16_t nVar = 0;
- for (int i = 0; i < pSW->nCols; i++) {
- SSchema* schema = pSW->pSchema + i;
- fLen += TYPE_BYTES[schema->type];
- rowSize += schema->bytes;
- if (IS_VAR_DATA_TYPE(schema->type)) {
- nVar++;
- }
- }
-
- int32_t rows = rspObj.resInfo.numOfRows;
- int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
- int32_t schemaLen = 0;
- int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
-
const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
if (!tbName) {
uError("WriteRaw: tbname is null");
@@ -1398,6 +1404,35 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
goto end;
}
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST){
+ uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
+ code = TSDB_CODE_SUCCESS;
+ continue;
+ }
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
+ goto end;
+ }
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
+ SSchema* schema = &pTableMeta->schema[i];
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t rows = rspObj.resInfo.numOfRows;
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1);
+ int32_t schemaLen = 0;
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
SSubmitReq* subReq = NULL;
SSubmitBlk* blk = NULL;
void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
@@ -1430,23 +1465,25 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
}
- STableMeta* pTableMeta = NULL;
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
- goto end;
- }
+ // pSW->pSchema should be same as pTableMeta->schema
+// ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
uint64_t uid = pTableMeta->uid;
- taosMemoryFreeClear(pTableMeta);
+ int16_t sver = pTableMeta->sversion;
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
SRowBuilder rb = {0};
- tdSRowInit(&rb, pSW->version);
- tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
- int32_t dataLen = 0;
+ tdSRowInit(&rb, sver);
+ tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen);
+ int32_t totalLen = 0;
+
+ SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ for (int i = 0; i < pSW->nCols; i++) {
+ SSchema* schema = &pSW->pSchema[i];
+ taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
+ }
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
@@ -1455,33 +1492,41 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
rspObj.resInfo.current += 1;
int32_t offset = 0;
- for (int32_t k = 0; k < pSW->nCols; k++) {
- const SSchema* pColumn = &pSW->pSchema[k];
- char* data = rspObj.resInfo.row[k];
- if (!data) {
+ for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
+ const SSchema* pColumn = &pTableMeta->schema[k];
+ int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
+ if(!index){
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- } else {
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- data -= VARSTR_HEADER_SIZE;
+ }else{
+ char* colData = rspObj.resInfo.row[*index];
+ if (!colData) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ } else {
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ colData -= VARSTR_HEADER_SIZE;
+ }
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k);
}
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
}
+
offset += TYPE_BYTES[pColumn->type];
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
+ totalLen += rowLen;
}
+ taosHashCleanup(schemaHash);
blk->uid = htobe64(uid);
blk->suid = htobe64(suid);
- blk->sversion = htonl(pSW->version);
+ blk->sversion = htonl(sver);
blk->schemaLen = htonl(schemaLen);
blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
+ blk->dataLen = htonl(totalLen);
+ subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen;
subReq->numOfBlocks++;
+ taosMemoryFreeClear(pTableMeta);
}
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
@@ -1535,6 +1580,7 @@ end:
qDestroyQuery(pQuery);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
+ taosMemoryFreeClear(pTableMeta);
return code;
}
diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c
index 7637ffbc80baa3f4e67b4a4fc27bc57adb8b7d3a..9f9a14952e75bdac29564c39dd4ef60da0d07ef0 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/tmq.c
@@ -164,6 +164,7 @@ typedef struct {
union {
SMqDataRsp dataRsp;
SMqMetaRsp metaRsp;
+ STaosxRsp taosxRsp;
};
} SMqPollRspWrapper;
@@ -810,8 +811,19 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
}
int32_t tmq_unsubscribe(tmq_t* tmq) {
+ int32_t rsp;
+ int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
- int32_t rsp = tmq_subscribe(tmq, lst);
+ while (1) {
+ rsp = tmq_subscribe(tmq, lst);
+ if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
+ break;
+ } else {
+ retryCnt++;
+ taosMsleep(500);
+ }
+ }
+
tmq_list_destroy(lst);
return rsp;
}
@@ -1130,18 +1142,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
- } else {
- ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
- tDecodeSMqMetaRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->metaRsp);
+
+ tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
+ tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
+ rspType);
+
+ } else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
+ SDecoder decoder;
+ tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
+ tDecodeSMqMetaRsp(&decoder, &pRspWrapper->metaRsp);
+ tDecoderClear(&decoder);
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else if (rspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SDecoder decoder;
+ tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
+ tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
+ tDecoderClear(&decoder);
+ memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else {
+ ASSERT(0);
}
taosMemoryFree(pMsg->pData);
- tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
- tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
- rspType);
-
taosWriteQitem(tmq->mqueue, pRspWrapper);
tsem_post(&tmq->rspSem);
@@ -1440,6 +1463,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
return pRspObj;
}
+SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
+ SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
+ pRspObj->resType = RES_TYPE__TAOSX;
+ tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
+ tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN);
+ pRspObj->vgId = pWrapper->vgHandle->vgId;
+ pRspObj->resIter = -1;
+ memcpy(&pRspObj->rsp, &pWrapper->dataRsp, sizeof(SMqTaosxRspObj));
+
+ pRspObj->resInfo.totalRows = 0;
+ pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
+ if (!pWrapper->dataRsp.withSchema) {
+ setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
+ }
+
+ return pRspObj;
+}
+
int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
/*tscDebug("call poll");*/
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
@@ -1581,8 +1622,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
SMqClientVg* pVg = pollRspWrapper->vgHandle;
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
* rspMsg->msg.rspOffset);*/
- pVg->currentOffset.version = pollRspWrapper->metaRsp.rspOffset;
- pVg->currentOffset.type = TMQ_OFFSET__LOG;
+ pVg->currentOffset = pollRspWrapper->metaRsp.rspOffset;
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
// build rsp
SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper);
@@ -1593,6 +1633,30 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
taosFreeQitem(pollRspWrapper);
}
+ } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
+ /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
+ int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
+ if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
+ SMqClientVg* pVg = pollRspWrapper->vgHandle;
+ /*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
+ * rspMsg->msg.rspOffset);*/
+ pVg->currentOffset = pollRspWrapper->taosxRsp.rspOffset;
+ atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
+ if (pollRspWrapper->taosxRsp.blockNum == 0) {
+ taosFreeQitem(pollRspWrapper);
+ rspWrapper = NULL;
+ continue;
+ }
+ // build rsp
+ SMqRspObj* pRsp = tmqBuildRspFromWrapper(pollRspWrapper);
+ taosFreeQitem(pollRspWrapper);
+ return pRsp;
+ } else {
+ tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n",
+ pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
+ taosFreeQitem(pollRspWrapper);
+ }
} else {
/*printf("handle ep rsp %d\n", rspMsg->head.mqMsgType);*/
bool reset = false;
@@ -1705,9 +1769,11 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) {
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DELETE) {
- return TMQ_RES_DATA;
+ return TMQ_RES_TAOSX;
}
return TMQ_RES_TABLE_META;
+ } else if (TD_RES_TMQ_TAOSX(res)) {
+ return TMQ_RES_DATA;
} else {
return TMQ_RES_INVALID;
}
diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp
index 68a8b9d336ae49e34c3dab28d3fdad6d3f27e9d4..b62238ccf26c991a516313270889a05a5b87d6ee 100644
--- a/source/client/test/smlTest.cpp
+++ b/source/client/test/smlTest.cpp
@@ -692,3 +692,52 @@ TEST(testCase, smlParseTelnetLine_diff_json_type2_Test) {
ASSERT_NE(ret, 0);
smlDestroyInfo(info);
}
+
+TEST(testCase, sml_col_4096_Test) {
+ SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ ASSERT_NE(info, nullptr);
+
+ const char *sql[] = {
+ "spgwgvldxv,id=spgwgvldxv_1,t0=f c0=t,c1=t,c2=t,c3=t,c4=t,c5=t,c6=t,c7=t,c8=t,c9=t,c10=t,c11=t,c12=t,c13=t,c14=t,c15=t,c16=t,c17=t,c18=t,c19=t,c20=t,c21=t,c22=t,c23=t,c24=t,c25=t,c26=t,c27=t,c28=t,c29=t,c30=t,c31=t,c32=t,c33=t,c34=t,c35=t,c36=t,c37=t,c38=t,c39=t,c40=t,c41=t,c42=t,c43=t,c44=t,c45=t,c46=t,c47=t,c48=t,c49=t,c50=t,c51=t,c52=t,c53=t,c54=t,c55=t,c56=t,c57=t,c58=t,c59=t,c60=t,c61=t,c62=t,c63=t,c64=t,c65=t,c66=t,c67=t,c68=t,c69=t,c70=t,c71=t,c72=t,c73=t,c74=t,c75=t,c76=t,c77=t,c78=t,c79=t,c80=t,c81=t,c82=t,c83=t,c84=t,c85=t,c86=t,c87=t,c88=t,c89=t,c90=t,c91=t,c92=t,c93=t,c94=t,c95=t,c96=t,c97=t,c98=t,c99=t,c100=t,"
+ "c101=t,c102=t,c103=t,c104=t,c105=t,c106=t,c107=t,c108=t,c109=t,c110=t,c111=t,c112=t,c113=t,c114=t,c115=t,c116=t,c117=t,c118=t,c119=t,c120=t,c121=t,c122=t,c123=t,c124=t,c125=t,c126=t,c127=t,c128=t,c129=t,c130=t,c131=t,c132=t,c133=t,c134=t,c135=t,c136=t,c137=t,c138=t,c139=t,c140=t,c141=t,c142=t,c143=t,c144=t,c145=t,c146=t,c147=t,c148=t,c149=t,c150=t,c151=t,c152=t,c153=t,c154=t,c155=t,c156=t,c157=t,c158=t,c159=t,c160=t,c161=t,c162=t,c163=t,c164=t,c165=t,c166=t,c167=t,c168=t,c169=t,c170=t,c171=t,c172=t,c173=t,c174=t,c175=t,c176=t,c177=t,c178=t,c179=t,c180=t,c181=t,c182=t,c183=t,c184=t,c185=t,c186=t,c187=t,c188=t,c189=t,"
+ "c190=t,c191=t,c192=t,c193=t,c194=t,c195=t,c196=t,c197=t,c198=t,c199=t,c200=t,c201=t,c202=t,c203=t,c204=t,c205=t,c206=t,c207=t,c208=t,c209=t,c210=t,c211=t,c212=t,c213=t,c214=t,c215=t,c216=t,c217=t,c218=t,c219=t,c220=t,c221=t,c222=t,c223=t,c224=t,c225=t,c226=t,c227=t,c228=t,c229=t,c230=t,c231=t,c232=t,c233=t,c234=t,c235=t,c236=t,c237=t,c238=t,c239=t,c240=t,c241=t,c242=t,c243=t,c244=t,c245=t,c246=t,c247=t,c248=t,c249=t,c250=t,c251=t,c252=t,c253=t,c254=t,c255=t,c256=t,c257=t,c258=t,c259=t,c260=t,c261=t,c262=t,c263=t,c264=t,c265=t,c266=t,c267=t,c268=t,c269=t,c270=t,c271=t,c272=t,c273=t,c274=t,c275=t,c276=t,c277=t,c278=t,"
+ "c279=t,c280=t,c281=t,c282=t,c283=t,c284=t,c285=t,c286=t,c287=t,c288=t,c289=t,c290=t,c291=t,c292=t,c293=t,c294=t,c295=t,c296=t,c297=t,c298=t,c299=t,c300=t,c301=t,c302=t,c303=t,c304=t,c305=t,c306=t,c307=t,c308=t,c309=t,c310=t,c311=t,c312=t,c313=t,c314=t,c315=t,c316=t,c317=t,c318=t,c319=t,c320=t,c321=t,c322=t,c323=t,c324=t,c325=t,c326=t,c327=t,c328=t,c329=t,c330=t,c331=t,c332=t,c333=t,c334=t,c335=t,c336=t,c337=t,c338=t,c339=t,c340=t,c341=t,c342=t,c343=t,c344=t,c345=t,c346=t,c347=t,c348=t,c349=t,c350=t,c351=t,c352=t,c353=t,c354=t,c355=t,c356=t,c357=t,c358=t,c359=t,c360=t,c361=t,c362=t,c363=t,c364=t,c365=t,c366=t,c367=t,c368=t,c369=t,c370=t,c371=t,c372=t,c373=t,c374=t,c375=t,c376=t,c377=t,c378=t,c379=t,c380=t,c381=t,c382=t,c383=t,c384=t,c385=t,c386=t,c387=t,c388=t,c389=t,c390=t,c391=t,c392=t,c393=t,c394=t,c395=t,c396=t,c397=t,c398=t,c399=t,c400=t,c401=t,c402=t,c403=t,c404=t,c405=t,c406=t,c407=t,c408=t,c409=t,c410=t,c411=t,c412=t,c413=t,c414=t,c415=t,c416=t,c417=t,c418=t,c419=t,c420=t,c421=t,c422=t,c423=t,c424=t,c425=t,c426=t,c427=t,c428=t,c429=t,c430=t,c431=t,c432=t,c433=t,c434=t,c435=t,c436=t,c437=t,c438=t,c439=t,c440=t,c441=t,c442=t,c443=t,c444=t,c445=t,c446=t,"
+ "c447=t,c448=t,c449=t,c450=t,c451=t,c452=t,c453=t,c454=t,c455=t,c456=t,c457=t,c458=t,c459=t,c460=t,c461=t,c462=t,c463=t,c464=t,c465=t,c466=t,c467=t,c468=t,c469=t,c470=t,c471=t,c472=t,c473=t,c474=t,c475=t,c476=t,c477=t,c478=t,c479=t,c480=t,c481=t,c482=t,c483=t,c484=t,c485=t,c486=t,c487=t,c488=t,c489=t,c490=t,c491=t,c492=t,c493=t,c494=t,c495=t,c496=t,c497=t,c498=t,c499=t,c500=t,c501=t,c502=t,c503=t,c504=t,c505=t,c506=t,c507=t,c508=t,c509=t,c510=t,c511=t,c512=t,c513=t,c514=t,c515=t,c516=t,c517=t,c518=t,c519=t,c520=t,c521=t,c522=t,c523=t,c524=t,c525=t,c526=t,c527=t,c528=t,c529=t,c530=t,c531=t,c532=t,c533=t,c534=t,c535=t,c536=t,c537=t,c538=t,c539=t,c540=t,c541=t,c542=t,c543=t,c544=t,c545=t,c546=t,c547=t,c548=t,c549=t,c550=t,c551=t,c552=t,c553=t,c554=t,c555=t,c556=t,c557=t,c558=t,c559=t,c560=t,c561=t,c562=t,c563=t,c564=t,c565=t,c566=t,c567=t,c568=t,c569=t,c570=t,c571=t,c572=t,c573=t,c574=t,c575=t,c576=t,c577=t,c578=t,c579=t,c580=t,c581=t,c582=t,c583=t,c584=t,c585=t,c586=t,c587=t,c588=t,c589=t,c590=t,c591=t,c592=t,c593=t,c594=t,c595=t,c596=t,c597=t,c598=t,c599=t,c600=t,c601=t,c602=t,c603=t,c604=t,c605=t,c606=t,c607=t,c608=t,c609=t,c610=t,c611=t,c612=t,c613=t,c614=t,"
+ "c615=t,c616=t,c617=t,c618=t,c619=t,c620=t,c621=t,c622=t,c623=t,c624=t,c625=t,c626=t,c627=t,c628=t,c629=t,c630=t,c631=t,c632=t,c633=t,c634=t,c635=t,c636=t,c637=t,c638=t,c639=t,c640=t,c641=t,c642=t,c643=t,c644=t,c645=t,c646=t,c647=t,c648=t,c649=t,c650=t,c651=t,c652=t,c653=t,c654=t,c655=t,c656=t,c657=t,c658=t,c659=t,c660=t,c661=t,c662=t,c663=t,c664=t,c665=t,c666=t,c667=t,c668=t,c669=t,c670=t,c671=t,c672=t,c673=t,c674=t,c675=t,c676=t,c677=t,c678=t,c679=t,c680=t,c681=t,c682=t,c683=t,c684=t,c685=t,c686=t,c687=t,c688=t,c689=t,c690=t,c691=t,c692=t,c693=t,c694=t,c695=t,c696=t,c697=t,c698=t,c699=t,c700=t,c701=t,c702=t,c703=t,c704=t,c705=t,c706=t,c707=t,c708=t,c709=t,c710=t,c711=t,c712=t,c713=t,c714=t,c715=t,c716=t,c717=t,c718=t,c719=t,c720=t,c721=t,c722=t,c723=t,c724=t,c725=t,c726=t,c727=t,c728=t,c729=t,c730=t,c731=t,c732=t,c733=t,c734=t,c735=t,c736=t,c737=t,c738=t,c739=t,c740=t,c741=t,c742=t,c743=t,c744=t,c745=t,c746=t,c747=t,c748=t,c749=t,c750=t,c751=t,c752=t,c753=t,c754=t,c755=t,c756=t,c757=t,c758=t,c759=t,c760=t,c761=t,c762=t,c763=t,c764=t,c765=t,c766=t,c767=t,c768=t,c769=t,c770=t,c771=t,c772=t,c773=t,c774=t,c775=t,c776=t,c777=t,c778=t,c779=t,c780=t,c781=t,c782=t,"
+ "c783=t,c784=t,c785=t,c786=t,c787=t,c788=t,c789=t,c790=t,c791=t,c792=t,c793=t,c794=t,c795=t,c796=t,c797=t,c798=t,c799=t,c800=t,c801=t,c802=t,c803=t,c804=t,c805=t,c806=t,c807=t,c808=t,c809=t,c810=t,c811=t,c812=t,c813=t,"
+ "c814=t,c815=t,c816=t,c817=t,c818=t,c819=t,c820=t,c821=t,c822=t,c823=t,c824=t,c825=t,c826=t,c827=t,c828=t,c829=t,c830=t,c831=t,c832=t,c833=t,c834=t,c835=t,c836=t,c837=t,c838=t,c839=t,c840=t,c841=t,c842=t,c843=t,c844=t,c845=t,c846=t,c847=t,c848=t,c849=t,c850=t,c851=t,c852=t,c853=t,c854=t,c855=t,c856=t,c857=t,c858=t,c859=t,c860=t,c861=t,c862=t,"
+ "c863=t,c864=t,c865=t,c866=t,c867=t,c868=t,c869=t,c870=t,c871=t,c872=t,c873=t,c874=t,c875=t,c876=t,c877=t,c878=t,c879=t,c880=t,c881=t,c882=t,c883=t,c884=t,c885=t,c886=t,c887=t,c888=t,c889=t,c890=t,c891=t,c892=t,c893=t,c894=t,c895=t,c896=t,c897=t,c898=t,c899=t,c900=t,c901=t,c902=t,c903=t,c904=t,c905=t,c906=t,c907=t,c908=t,c909=t,c910=t,c911=t,c912=t,c913=t,c914=t,c915=t,c916=t,c917=t,c918=t,c919=t,c920=t,c921=t,c922=t,c923=t,c924=t,c925=t,c926=t,c927=t,c928=t,c929=t,c930=t,c931=t,c932=t,c933=t,c934=t,c935=t,c936=t,c937=t,c938=t,c939=t,c940=t,c941=t,c942=t,c943=t,c944=t,c945=t,c946=t,c947=t,c948=t,c949=t,c950=t,c951=t,c952=t,c953=t,c954=t,c955=t,c956=t,c957=t,c958=t,c959=t,c960=t,c961=t,c962=t,c963=t,c964=t,c965=t,c966=t,c967=t,c968=t,c969=t,c970=t,c971=t,c972=t,c973=t,c974=t,c975=t,c976=t,c977=t,c978=t,c979=t,c980=t,c981=t,c982=t,c983=t,c984=t,c985=t,c986=t,c987=t,c988=t,c989=t,c990=t,c991=t,c992=t,c993=t,c994=t,c995=t,c996=t,c997=t,c998=t,c999=t,c1000=t,c1001=t,c1002=t,c1003=t,c1004=t,c1005=t,c1006=t,c1007=t,c1008=t,c1009=t,c1010=t,c1011=t,c1012=t,c1013=t,c1014=t,c1015=t,c1016=t,c1017=t,c1018=t,c1019=t,c1020=t,c1021=t,c1022=t,c1023=t,c1024=t,c1025=t,c1026=t,"
+ "c1027=t,c1028=t,c1029=t,c1030=t,c1031=t,c1032=t,c1033=t,c1034=t,c1035=t,c1036=t,c1037=t,c1038=t,c1039=t,c1040=t,c1041=t,c1042=t,c1043=t,c1044=t,c1045=t,c1046=t,c1047=t,c1048=t,c1049=t,c1050=t,c1051=t,c1052=t,c1053=t,c1054=t,c1055=t,c1056=t,c1057=t,c1058=t,c1059=t,c1060=t,c1061=t,c1062=t,c1063=t,c1064=t,c1065=t,c1066=t,c1067=t,c1068=t,c1069=t,c1070=t,c1071=t,c1072=t,c1073=t,c1074=t,c1075=t,c1076=t,c1077=t,c1078=t,c1079=t,c1080=t,c1081=t,c1082=t,c1083=t,c1084=t,c1085=t,c1086=t,c1087=t,c1088=t,c1089=t,c1090=t,c1091=t,c1092=t,c1093=t,c1094=t,c1095=t,c1096=t,c1097=t,c1098=t,c1099=t,c1100=t,c1101=t,c1102=t,c1103=t,c1104=t,c1105=t,c1106=t,c1107=t,c1108=t,c1109=t,c1110=t,c1111=t,c1112=t,c1113=t,c1114=t,c1115=t,c1116=t,c1117=t,c1118=t,c1119=t,c1120=t,c1121=t,c1122=t,c1123=t,c1124=t,c1125=t,c1126=t,c1127=t,c1128=t,c1129=t,c1130=t,c1131=t,c1132=t,c1133=t,c1134=t,c1135=t,c1136=t,c1137=t,c1138=t,c1139=t,c1140=t,c1141=t,c1142=t,c1143=t,c1144=t,c1145=t,c1146=t,c1147=t,c1148=t,c1149=t,c1150=t,c1151=t,c1152=t,c1153=t,c1154=t,c1155=t,c1156=t,c1157=t,c1158=t,c1159=t,c1160=t,c1161=t,c1162=t,c1163=t,c1164=t,c1165=t,c1166=t,c1167=t,c1168=t,c1169=t,c1170=t,c1171=t,c1172=t,c1173=t,"
+ "c1174=t,c1175=t,c1176=t,c1177=t,c1178=t,c1179=t,c1180=t,c1181=t,c1182=t,c1183=t,c1184=t,c1185=t,c1186=t,c1187=t,c1188=t,c1189=t,c1190=t,c1191=t,c1192=t,c1193=t,c1194=t,c1195=t,c1196=t,c1197=t,c1198=t,c1199=t,c1200=t,c1201=t,c1202=t,c1203=t,c1204=t,c1205=t,c1206=t,c1207=t,c1208=t,c1209=t,c1210=t,c1211=t,c1212=t,c1213=t,c1214=t,c1215=t,c1216=t,c1217=t,c1218=t,c1219=t,c1220=t,c1221=t,c1222=t,c1223=t,c1224=t,c1225=t,c1226=t,c1227=t,c1228=t,c1229=t,c1230=t,c1231=t,c1232=t,c1233=t,c1234=t,c1235=t,c1236=t,c1237=t,c1238=t,c1239=t,c1240=t,c1241=t,c1242=t,c1243=t,c1244=t,c1245=t,c1246=t,c1247=t,c1248=t,c1249=t,c1250=t,c1251=t,c1252=t,c1253=t,c1254=t,c1255=t,c1256=t,c1257=t,c1258=t,c1259=t,c1260=t,c1261=t,c1262=t,c1263=t,c1264=t,c1265=t,c1266=t,c1267=t,c1268=t,c1269=t,c1270=t,c1271=t,c1272=t,c1273=t,c1274=t,c1275=t,c1276=t,c1277=t,c1278=t,c1279=t,c1280=t,c1281=t,c1282=t,c1283=t,c1284=t,c1285=t,c1286=t,c1287=t,c1288=t,c1289=t,c1290=t,c1291=t,c1292=t,c1293=t,c1294=t,c1295=t,c1296=t,c1297=t,c1298=t,c1299=t,c1300=t,c1301=t,c1302=t,c1303=t,c1304=t,c1305=t,c1306=t,c1307=t,c1308=t,c1309=t,c1310=t,c1311=t,c1312=t,c1313=t,c1314=t,c1315=t,c1316=t,c1317=t,c1318=t,c1319=t,c1320=t,"
+ "c1321=t,c1322=t,c1323=t,c1324=t,c1325=t,c1326=t,c1327=t,c1328=t,c1329=t,c1330=t,c1331=t,c1332=t,c1333=t,c1334=t,c1335=t,c1336=t,c1337=t,c1338=t,c1339=t,c1340=t,c1341=t,c1342=t,c1343=t,c1344=t,c1345=t,c1346=t,c1347=t,"
+ "c1348=t,c1349=t,c1350=t,c1351=t,c1352=t,c1353=t,c1354=t,c1355=t,c1356=t,c1357=t,c1358=t,c1359=t,c1360=t,c1361=t,c1362=t,c1363=t,c1364=t,c1365=t,c1366=t,c1367=t,c1368=t,c1369=t,c1370=t,c1371=t,c1372=t,c1373=t,c1374=t,c1375=t,c1376=t,c1377=t,c1378=t,c1379=t,c1380=t,c1381=t,c1382=t,c1383=t,c1384=t,c1385=t,c1386=t,c1387=t,c1388=t,c1389=t,c1390=t,c1391=t,c1392=t,c1393=t,c1394=t,c1395=t,c1396=t,c1397=t,c1398=t,c1399=t,c1400=t,c1401=t,c1402=t,c1403=t,c1404=t,c1405=t,c1406=t,c1407=t,c1408=t,c1409=t,c1410=t,c1411=t,c1412=t,c1413=t,c1414=t,c1415=t,c1416=t,c1417=t,c1418=t,c1419=t,c1420=t,c1421=t,c1422=t,c1423=t,c1424=t,c1425=t,c1426=t,c1427=t,c1428=t,c1429=t,c1430=t,c1431=t,c1432=t,c1433=t,c1434=t,c1435=t,c1436=t,c1437=t,c1438=t,c1439=t,c1440=t,c1441=t,c1442=t,c1443=t,c1444=t,c1445=t,c1446=t,c1447=t,c1448=t,c1449=t,c1450=t,c1451=t,c1452=t,c1453=t,c1454=t,c1455=t,c1456=t,c1457=t,c1458=t,c1459=t,c1460=t,c1461=t,c1462=t,c1463=t,c1464=t,c1465=t,c1466=t,c1467=t,c1468=t,c1469=t,c1470=t,c1471=t,c1472=t,c1473=t,c1474=t,c1475=t,c1476=t,c1477=t,c1478=t,c1479=t,c1480=t,c1481=t,c1482=t,c1483=t,c1484=t,c1485=t,c1486=t,c1487=t,c1488=t,c1489=t,c1490=t,c1491=t,c1492=t,c1493=t,c1494=t,"
+ "c1495=t,c1496=t,c1497=t,c1498=t,c1499=t,c1500=t,c1501=t,c1502=t,c1503=t,c1504=t,c1505=t,c1506=t,c1507=t,c1508=t,c1509=t,c1510=t,c1511=t,c1512=t,c1513=t,c1514=t,c1515=t,c1516=t,c1517=t,c1518=t,c1519=t,c1520=t,c1521=t,c1522=t,c1523=t,c1524=t,c1525=t,c1526=t,c1527=t,c1528=t,c1529=t,c1530=t,c1531=t,c1532=t,c1533=t,c1534=t,c1535=t,c1536=t,c1537=t,c1538=t,c1539=t,c1540=t,c1541=t,c1542=t,c1543=t,c1544=t,c1545=t,c1546=t,c1547=t,c1548=t,c1549=t,c1550=t,c1551=t,c1552=t,c1553=t,c1554=t,c1555=t,c1556=t,c1557=t,c1558=t,c1559=t,c1560=t,c1561=t,c1562=t,c1563=t,c1564=t,c1565=t,c1566=t,c1567=t,c1568=t,c1569=t,c1570=t,c1571=t,c1572=t,c1573=t,c1574=t,c1575=t,c1576=t,c1577=t,c1578=t,c1579=t,c1580=t,c1581=t,c1582=t,c1583=t,c1584=t,c1585=t,c1586=t,c1587=t,c1588=t,c1589=t,c1590=t,c1591=t,c1592=t,c1593=t,c1594=t,c1595=t,c1596=t,c1597=t,c1598=t,c1599=t,c1600=t,c1601=t,c1602=t,c1603=t,c1604=t,c1605=t,c1606=t,c1607=t,c1608=t,c1609=t,c1610=t,c1611=t,c1612=t,c1613=t,c1614=t,c1615=t,c1616=t,c1617=t,c1618=t,c1619=t,c1620=t,c1621=t,c1622=t,c1623=t,c1624=t,c1625=t,c1626=t,c1627=t,c1628=t,c1629=t,c1630=t,c1631=t,c1632=t,c1633=t,c1634=t,c1635=t,c1636=t,c1637=t,c1638=t,c1639=t,c1640=t,c1641=t,"
+ "c1642=t,c1643=t,c1644=t,c1645=t,c1646=t,c1647=t,c1648=t,c1649=t,c1650=t,c1651=t,c1652=t,c1653=t,c1654=t,c1655=t,c1656=t,c1657=t,c1658=t,c1659=t,c1660=t,c1661=t,c1662=t,c1663=t,c1664=t,c1665=t,c1666=t,c1667=t,c1668=t,c1669=t,c1670=t,c1671=t,c1672=t,c1673=t,c1674=t,c1675=t,c1676=t,c1677=t,c1678=t,c1679=t,c1680=t,c1681=t,c1682=t,c1683=t,c1684=t,c1685=t,c1686=t,c1687=t,c1688=t,c1689=t,c1690=t,c1691=t,c1692=t,c1693=t,c1694=t,c1695=t,c1696=t,c1697=t,c1698=t,c1699=t,c1700=t,c1701=t,c1702=t,c1703=t,c1704=t,c1705=t,c1706=t,c1707=t,c1708=t,c1709=t,c1710=t,c1711=t,c1712=t,c1713=t,c1714=t,c1715=t,c1716=t,c1717=t,c1718=t,c1719=t,c1720=t,c1721=t,c1722=t,c1723=t,c1724=t,c1725=t,c1726=t,c1727=t,c1728=t,c1729=t,c1730=t,c1731=t,c1732=t,c1733=t,c1734=t,c1735=t,c1736=t,c1737=t,c1738=t,c1739=t,c1740=t,c1741=t,c1742=t,c1743=t,c1744=t,c1745=t,c1746=t,c1747=t,c1748=t,c1749=t,c1750=t,c1751=t,c1752=t,c1753=t,c1754=t,c1755=t,c1756=t,c1757=t,c1758=t,c1759=t,c1760=t,c1761=t,c1762=t,c1763=t,c1764=t,c1765=t,c1766=t,c1767=t,c1768=t,c1769=t,c1770=t,c1771=t,c1772=t,c1773=t,c1774=t,c1775=t,c1776=t,c1777=t,c1778=t,c1779=t,c1780=t,c1781=t,c1782=t,c1783=t,c1784=t,c1785=t,c1786=t,c1787=t,c1788=t,"
+ "c1789=t,c1790=t,c1791=t,c1792=t,c1793=t,c1794=t,c1795=t,c1796=t,c1797=t,c1798=t,c1799=t,c1800=t,c1801=t,c1802=t,c1803=t,c1804=t,c1805=t,c1806=t,c1807=t,c1808=t,c1809=t,c1810=t,c1811=t,c1812=t,c1813=t,c1814=t,c1815=t,"
+ "c1816=t,c1817=t,c1818=t,c1819=t,c1820=t,c1821=t,c1822=t,c1823=t,c1824=t,c1825=t,c1826=t,c1827=t,c1828=t,c1829=t,c1830=t,c1831=t,c1832=t,c1833=t,c1834=t,c1835=t,c1836=t,c1837=t,c1838=t,c1839=t,c1840=t,c1841=t,c1842=t,c1843=t,c1844=t,c1845=t,c1846=t,c1847=t,c1848=t,c1849=t,c1850=t,c1851=t,c1852=t,c1853=t,c1854=t,c1855=t,c1856=t,c1857=t,c1858=t,c1859=t,c1860=t,c1861=t,c1862=t,c1863=t,c1864=t,c1865=t,c1866=t,c1867=t,c1868=t,c1869=t,c1870=t,c1871=t,c1872=t,c1873=t,c1874=t,c1875=t,c1876=t,c1877=t,c1878=t,c1879=t,c1880=t,c1881=t,c1882=t,c1883=t,c1884=t,c1885=t,c1886=t,c1887=t,c1888=t,c1889=t,c1890=t,c1891=t,c1892=t,c1893=t,c1894=t,c1895=t,c1896=t,c1897=t,c1898=t,c1899=t,c1900=t,c1901=t,c1902=t,c1903=t,c1904=t,c1905=t,c1906=t,c1907=t,c1908=t,c1909=t,c1910=t,c1911=t,c1912=t,c1913=t,c1914=t,c1915=t,c1916=t,c1917=t,c1918=t,c1919=t,c1920=t,c1921=t,c1922=t,c1923=t,c1924=t,c1925=t,c1926=t,c1927=t,c1928=t,c1929=t,c1930=t,c1931=t,c1932=t,c1933=t,c1934=t,c1935=t,c1936=t,c1937=t,c1938=t,c1939=t,c1940=t,c1941=t,c1942=t,c1943=t,c1944=t,c1945=t,c1946=t,c1947=t,c1948=t,c1949=t,c1950=t,c1951=t,c1952=t,c1953=t,c1954=t,c1955=t,c1956=t,c1957=t,c1958=t,c1959=t,c1960=t,c1961=t,c1962=t,"
+ "c1963=t,c1964=t,c1965=t,c1966=t,c1967=t,c1968=t,c1969=t,c1970=t,c1971=t,c1972=t,c1973=t,c1974=t,c1975=t,c1976=t,c1977=t,c1978=t,c1979=t,c1980=t,c1981=t,c1982=t,c1983=t,c1984=t,c1985=t,c1986=t,c1987=t,c1988=t,c1989=t,c1990=t,c1991=t,c1992=t,c1993=t,c1994=t,c1995=t,c1996=t,c1997=t,c1998=t,c1999=t,c2000=t,c2001=t,c2002=t,c2003=t,c2004=t,c2005=t,c2006=t,c2007=t,c2008=t,c2009=t,c2010=t,c2011=t,c2012=t,c2013=t,c2014=t,c2015=t,c2016=t,c2017=t,c2018=t,c2019=t,c2020=t,c2021=t,c2022=t,c2023=t,c2024=t,c2025=t,c2026=t,c2027=t,c2028=t,c2029=t,c2030=t,c2031=t,c2032=t,c2033=t,c2034=t,c2035=t,c2036=t,c2037=t,c2038=t,c2039=t,c2040=t,c2041=t,c2042=t,c2043=t,c2044=t,c2045=t,c2046=t,c2047=t,c2048=t,c2049=t,c2050=t,c2051=t,c2052=t,c2053=t,c2054=t,c2055=t,c2056=t,c2057=t,c2058=t,c2059=t,c2060=t,c2061=t,c2062=t,c2063=t,c2064=t,c2065=t,c2066=t,c2067=t,c2068=t,c2069=t,c2070=t,c2071=t,c2072=t,c2073=t,c2074=t,c2075=t,c2076=t,c2077=t,c2078=t,c2079=t,c2080=t,c2081=t,c2082=t,c2083=t,c2084=t,c2085=t,c2086=t,c2087=t,c2088=t,c2089=t,c2090=t,c2091=t,c2092=t,c2093=t,c2094=t,c2095=t,c2096=t,c2097=t,c2098=t,c2099=t,c2100=t,c2101=t,c2102=t,c2103=t,c2104=t,c2105=t,c2106=t,c2107=t,c2108=t,c2109=t,"
+ "c2110=t,c2111=t,c2112=t,c2113=t,c2114=t,c2115=t,c2116=t,c2117=t,c2118=t,c2119=t,c2120=t,c2121=t,c2122=t,c2123=t,c2124=t,c2125=t,c2126=t,c2127=t,c2128=t,c2129=t,c2130=t,c2131=t,c2132=t,c2133=t,c2134=t,c2135=t,c2136=t,c2137=t,c2138=t,c2139=t,c2140=t,c2141=t,c2142=t,c2143=t,c2144=t,c2145=t,c2146=t,c2147=t,c2148=t,c2149=t,c2150=t,c2151=t,c2152=t,c2153=t,c2154=t,c2155=t,c2156=t,c2157=t,c2158=t,c2159=t,c2160=t,c2161=t,c2162=t,c2163=t,c2164=t,c2165=t,c2166=t,c2167=t,c2168=t,c2169=t,c2170=t,c2171=t,c2172=t,c2173=t,c2174=t,c2175=t,c2176=t,c2177=t,c2178=t,c2179=t,c2180=t,c2181=t,c2182=t,c2183=t,c2184=t,c2185=t,c2186=t,c2187=t,c2188=t,c2189=t,c2190=t,c2191=t,c2192=t,c2193=t,c2194=t,c2195=t,c2196=t,c2197=t,c2198=t,c2199=t,c2200=t,c2201=t,c2202=t,c2203=t,c2204=t,c2205=t,c2206=t,c2207=t,c2208=t,c2209=t,c2210=t,c2211=t,c2212=t,c2213=t,c2214=t,c2215=t,c2216=t,c2217=t,c2218=t,c2219=t,c2220=t,c2221=t,c2222=t,c2223=t,c2224=t,c2225=t,c2226=t,c2227=t,c2228=t,c2229=t,c2230=t,c2231=t,c2232=t,c2233=t,c2234=t,c2235=t,c2236=t,c2237=t,c2238=t,c2239=t,c2240=t,c2241=t,c2242=t,c2243=t,c2244=t,c2245=t,c2246=t,c2247=t,c2248=t,c2249=t,c2250=t,c2251=t,c2252=t,c2253=t,c2254=t,c2255=t,c2256=t,"
+ "c2257=t,c2258=t,c2259=t,c2260=t,c2261=t,c2262=t,c2263=t,c2264=t,c2265=t,c2266=t,c2267=t,c2268=t,c2269=t,c2270=t,c2271=t,c2272=t,c2273=t,c2274=t,c2275=t,c2276=t,c2277=t,c2278=t,c2279=t,c2280=t,c2281=t,c2282=t,c2283=t,"
+ "c2284=t,c2285=t,c2286=t,c2287=t,c2288=t,c2289=t,c2290=t,c2291=t,c2292=t,c2293=t,c2294=t,c2295=t,c2296=t,c2297=t,c2298=t,c2299=t,c2300=t,c2301=t,c2302=t,c2303=t,c2304=t,c2305=t,c2306=t,c2307=t,c2308=t,c2309=t,c2310=t,c2311=t,c2312=t,c2313=t,c2314=t,c2315=t,c2316=t,c2317=t,c2318=t,c2319=t,c2320=t,c2321=t,c2322=t,c2323=t,c2324=t,c2325=t,c2326=t,c2327=t,c2328=t,c2329=t,c2330=t,c2331=t,c2332=t,c2333=t,c2334=t,c2335=t,c2336=t,c2337=t,c2338=t,c2339=t,c2340=t,c2341=t,c2342=t,c2343=t,c2344=t,c2345=t,c2346=t,c2347=t,c2348=t,c2349=t,c2350=t,c2351=t,c2352=t,c2353=t,c2354=t,c2355=t,c2356=t,c2357=t,c2358=t,c2359=t,c2360=t,c2361=t,c2362=t,c2363=t,c2364=t,c2365=t,c2366=t,c2367=t,c2368=t,c2369=t,c2370=t,c2371=t,c2372=t,c2373=t,c2374=t,c2375=t,c2376=t,c2377=t,c2378=t,c2379=t,c2380=t,c2381=t,c2382=t,c2383=t,c2384=t,c2385=t,c2386=t,c2387=t,c2388=t,c2389=t,c2390=t,c2391=t,c2392=t,c2393=t,c2394=t,c2395=t,c2396=t,c2397=t,c2398=t,c2399=t,c2400=t,c2401=t,c2402=t,c2403=t,c2404=t,c2405=t,c2406=t,c2407=t,c2408=t,c2409=t,c2410=t,c2411=t,c2412=t,c2413=t,c2414=t,c2415=t,c2416=t,c2417=t,c2418=t,c2419=t,c2420=t,c2421=t,c2422=t,c2423=t,c2424=t,c2425=t,c2426=t,c2427=t,c2428=t,c2429=t,c2430=t,"
+ "c2431=t,c2432=t,c2433=t,c2434=t,c2435=t,c2436=t,c2437=t,c2438=t,c2439=t,c2440=t,c2441=t,c2442=t,c2443=t,c2444=t,c2445=t,c2446=t,c2447=t,c2448=t,c2449=t,c2450=t,c2451=t,c2452=t,c2453=t,c2454=t,c2455=t,c2456=t,c2457=t,c2458=t,c2459=t,c2460=t,c2461=t,c2462=t,c2463=t,c2464=t,c2465=t,c2466=t,c2467=t,c2468=t,c2469=t,c2470=t,c2471=t,c2472=t,c2473=t,c2474=t,c2475=t,c2476=t,c2477=t,c2478=t,c2479=t,c2480=t,c2481=t,c2482=t,c2483=t,c2484=t,c2485=t,c2486=t,c2487=t,c2488=t,c2489=t,c2490=t,c2491=t,c2492=t,c2493=t,c2494=t,c2495=t,c2496=t,c2497=t,c2498=t,c2499=t,c2500=t,c2501=t,c2502=t,c2503=t,c2504=t,c2505=t,c2506=t,c2507=t,c2508=t,c2509=t,c2510=t,c2511=t,c2512=t,c2513=t,c2514=t,c2515=t,c2516=t,c2517=t,c2518=t,c2519=t,c2520=t,c2521=t,c2522=t,c2523=t,c2524=t,c2525=t,c2526=t,c2527=t,c2528=t,c2529=t,c2530=t,c2531=t,c2532=t,c2533=t,c2534=t,c2535=t,c2536=t,c2537=t,c2538=t,c2539=t,c2540=t,c2541=t,c2542=t,c2543=t,c2544=t,c2545=t,c2546=t,c2547=t,c2548=t,c2549=t,c2550=t,c2551=t,c2552=t,c2553=t,c2554=t,c2555=t,c2556=t,c2557=t,c2558=t,c2559=t,c2560=t,c2561=t,c2562=t,c2563=t,c2564=t,c2565=t,c2566=t,c2567=t,c2568=t,c2569=t,c2570=t,c2571=t,c2572=t,c2573=t,c2574=t,c2575=t,c2576=t,c2577=t,"
+ "c2578=t,c2579=t,c2580=t,c2581=t,c2582=t,c2583=t,c2584=t,c2585=t,c2586=t,c2587=t,c2588=t,c2589=t,c2590=t,c2591=t,c2592=t,c2593=t,c2594=t,c2595=t,c2596=t,c2597=t,c2598=t,c2599=t,c2600=t,c2601=t,c2602=t,c2603=t,c2604=t,c2605=t,c2606=t,c2607=t,c2608=t,c2609=t,c2610=t,c2611=t,c2612=t,c2613=t,c2614=t,c2615=t,c2616=t,c2617=t,c2618=t,c2619=t,c2620=t,c2621=t,c2622=t,c2623=t,c2624=t,c2625=t,c2626=t,c2627=t,c2628=t,c2629=t,c2630=t,c2631=t,c2632=t,c2633=t,c2634=t,c2635=t,c2636=t,c2637=t,c2638=t,c2639=t,c2640=t,c2641=t,c2642=t,c2643=t,c2644=t,c2645=t,c2646=t,c2647=t,c2648=t,c2649=t,c2650=t,c2651=t,c2652=t,c2653=t,c2654=t,c2655=t,c2656=t,c2657=t,c2658=t,c2659=t,c2660=t,c2661=t,c2662=t,c2663=t,c2664=t,c2665=t,c2666=t,c2667=t,c2668=t,c2669=t,c2670=t,c2671=t,c2672=t,c2673=t,c2674=t,c2675=t,c2676=t,c2677=t,c2678=t,c2679=t,c2680=t,c2681=t,c2682=t,c2683=t,c2684=t,c2685=t,c2686=t,c2687=t,c2688=t,c2689=t,c2690=t,c2691=t,c2692=t,c2693=t,c2694=t,c2695=t,c2696=t,c2697=t,c2698=t,c2699=t,c2700=t,c2701=t,c2702=t,c2703=t,c2704=t,c2705=t,c2706=t,c2707=t,c2708=t,c2709=t,c2710=t,c2711=t,c2712=t,c2713=t,c2714=t,c2715=t,c2716=t,c2717=t,c2718=t,c2719=t,c2720=t,c2721=t,c2722=t,c2723=t,c2724=t,"
+ "c2725=t,c2726=t,c2727=t,c2728=t,c2729=t,c2730=t,c2731=t,c2732=t,c2733=t,c2734=t,c2735=t,c2736=t,c2737=t,c2738=t,c2739=t,c2740=t,c2741=t,c2742=t,c2743=t,c2744=t,c2745=t,c2746=t,c2747=t,c2748=t,c2749=t,c2750=t,c2751=t,c2752=t,c2753=t,c2754=t,c2755=t,c2756=t,c2757=t,c2758=t,c2759=t,c2760=t,c2761=t,c2762=t,c2763=t,c2764=t,c2765=t,c2766=t,c2767=t,c2768=t,c2769=t,c2770=t,c2771=t,c2772=t,c2773=t,c2774=t,c2775=t,c2776=t,c2777=t,c2778=t,c2779=t,c2780=t,c2781=t,c2782=t,c2783=t,c2784=t,c2785=t,c2786=t,c2787=t,c2788=t,c2789=t,c2790=t,c2791=t,c2792=t,c2793=t,c2794=t,c2795=t,c2796=t,c2797=t,c2798=t,c2799=t,c2800=t,c2801=t,c2802=t,c2803=t,c2804=t,c2805=t,c2806=t,c2807=t,c2808=t,c2809=t,c2810=t,c2811=t,c2812=t,c2813=t,c2814=t,c2815=t,c2816=t,c2817=t,c2818=t,c2819=t,c2820=t,c2821=t,c2822=t,c2823=t,c2824=t,c2825=t,c2826=t,c2827=t,c2828=t,c2829=t,c2830=t,c2831=t,c2832=t,c2833=t,c2834=t,c2835=t,c2836=t,c2837=t,c2838=t,c2839=t,c2840=t,c2841=t,c2842=t,c2843=t,c2844=t,c2845=t,c2846=t,c2847=t,c2848=t,c2849=t,c2850=t,c2851=t,c2852=t,c2853=t,c2854=t,c2855=t,c2856=t,c2857=t,c2858=t,c2859=t,c2860=t,c2861=t,c2862=t,c2863=t,c2864=t,c2865=t,c2866=t,c2867=t,c2868=t,c2869=t,c2870=t,c2871=t,"
+ "c2872=t,c2873=t,c2874=t,c2875=t,c2876=t,c2877=t,c2878=t,c2879=t,c2880=t,c2881=t,c2882=t,c2883=t,c2884=t,c2885=t,c2886=t,c2887=t,c2888=t,c2889=t,c2890=t,c2891=t,c2892=t,c2893=t,c2894=t,c2895=t,c2896=t,c2897=t,c2898=t,c2899=t,c2900=t,c2901=t,c2902=t,c2903=t,c2904=t,c2905=t,c2906=t,c2907=t,c2908=t,c2909=t,c2910=t,c2911=t,c2912=t,c2913=t,c2914=t,c2915=t,c2916=t,c2917=t,c2918=t,c2919=t,c2920=t,c2921=t,c2922=t,c2923=t,c2924=t,c2925=t,c2926=t,c2927=t,c2928=t,c2929=t,c2930=t,c2931=t,c2932=t,c2933=t,c2934=t,c2935=t,c2936=t,c2937=t,c2938=t,c2939=t,c2940=t,c2941=t,c2942=t,c2943=t,c2944=t,c2945=t,c2946=t,c2947=t,c2948=t,c2949=t,c2950=t,c2951=t,c2952=t,c2953=t,c2954=t,c2955=t,c2956=t,c2957=t,c2958=t,c2959=t,c2960=t,c2961=t,c2962=t,c2963=t,c2964=t,c2965=t,c2966=t,c2967=t,c2968=t,c2969=t,c2970=t,c2971=t,c2972=t,c2973=t,c2974=t,c2975=t,c2976=t,c2977=t,c2978=t,c2979=t,c2980=t,c2981=t,c2982=t,c2983=t,c2984=t,c2985=t,c2986=t,c2987=t,c2988=t,c2989=t,c2990=t,c2991=t,c2992=t,c2993=t,c2994=t,c2995=t,c2996=t,c2997=t,c2998=t,c2999=t,c3000=t,c3001=t,c3002=t,c3003=t,c3004=t,c3005=t,c3006=t,c3007=t,c3008=t,c3009=t,c3010=t,c3011=t,c3012=t,c3013=t,c3014=t,c3015=t,c3016=t,c3017=t,c3018=t,"
+ "c3019=t,c3020=t,c3021=t,c3022=t,c3023=t,c3024=t,c3025=t,c3026=t,c3027=t,c3028=t,c3029=t,c3030=t,c3031=t,c3032=t,c3033=t,c3034=t,c3035=t,c3036=t,c3037=t,c3038=t,c3039=t,c3040=t,c3041=t,c3042=t,c3043=t,c3044=t,c3045=t,c3046=t,c3047=t,c3048=t,c3049=t,c3050=t,c3051=t,c3052=t,c3053=t,c3054=t,c3055=t,c3056=t,c3057=t,c3058=t,c3059=t,c3060=t,c3061=t,c3062=t,c3063=t,c3064=t,c3065=t,c3066=t,c3067=t,c3068=t,c3069=t,c3070=t,c3071=t,c3072=t,c3073=t,c3074=t,c3075=t,c3076=t,c3077=t,c3078=t,c3079=t,c3080=t,c3081=t,c3082=t,c3083=t,c3084=t,c3085=t,c3086=t,c3087=t,c3088=t,c3089=t,c3090=t,c3091=t,c3092=t,c3093=t,c3094=t,c3095=t,c3096=t,c3097=t,c3098=t,c3099=t,c3100=t,c3101=t,c3102=t,c3103=t,c3104=t,c3105=t,c3106=t,c3107=t,c3108=t,c3109=t,c3110=t,c3111=t,c3112=t,c3113=t,c3114=t,c3115=t,c3116=t,c3117=t,c3118=t,c3119=t,c3120=t,c3121=t,c3122=t,c3123=t,c3124=t,c3125=t,c3126=t,c3127=t,c3128=t,c3129=t,c3130=t,c3131=t,c3132=t,c3133=t,c3134=t,c3135=t,c3136=t,c3137=t,c3138=t,c3139=t,c3140=t,c3141=t,c3142=t,c3143=t,c3144=t,c3145=t,c3146=t,c3147=t,c3148=t,c3149=t,c3150=t,c3151=t,c3152=t,c3153=t,c3154=t,c3155=t,c3156=t,c3157=t,c3158=t,c3159=t,c3160=t,c3161=t,c3162=t,c3163=t,c3164=t,c3165=t,"
+ "c3166=t,c3167=t,c3168=t,c3169=t,c3170=t,c3171=t,c3172=t,c3173=t,c3174=t,c3175=t,c3176=t,c3177=t,c3178=t,c3179=t,c3180=t,c3181=t,c3182=t,c3183=t,c3184=t,c3185=t,c3186=t,c3187=t,c3188=t,c3189=t,c3190=t,c3191=t,c3192=t,c3193=t,c3194=t,c3195=t,c3196=t,c3197=t,c3198=t,c3199=t,c3200=t,c3201=t,c3202=t,c3203=t,c3204=t,c3205=t,c3206=t,c3207=t,c3208=t,c3209=t,c3210=t,c3211=t,c3212=t,c3213=t,c3214=t,c3215=t,c3216=t,c3217=t,c3218=t,c3219=t,c3220=t,c3221=t,c3222=t,c3223=t,c3224=t,c3225=t,c3226=t,c3227=t,c3228=t,c3229=t,c3230=t,c3231=t,c3232=t,c3233=t,c3234=t,c3235=t,c3236=t,c3237=t,c3238=t,c3239=t,c3240=t,c3241=t,c3242=t,c3243=t,c3244=t,c3245=t,c3246=t,c3247=t,c3248=t,c3249=t,c3250=t,c3251=t,c3252=t,c3253=t,c3254=t,c3255=t,c3256=t,c3257=t,c3258=t,c3259=t,c3260=t,c3261=t,c3262=t,c3263=t,c3264=t,c3265=t,c3266=t,c3267=t,c3268=t,c3269=t,c3270=t,c3271=t,c3272=t,c3273=t,c3274=t,c3275=t,c3276=t,c3277=t,c3278=t,c3279=t,c3280=t,c3281=t,c3282=t,c3283=t,c3284=t,c3285=t,c3286=t,c3287=t,c3288=t,c3289=t,c3290=t,c3291=t,c3292=t,c3293=t,c3294=t,c3295=t,c3296=t,c3297=t,c3298=t,c3299=t,c3300=t,c3301=t,c3302=t,c3303=t,c3304=t,c3305=t,c3306=t,c3307=t,c3308=t,c3309=t,c3310=t,c3311=t,c3312=t,"
+ "c3313=t,c3314=t,c3315=t,c3316=t,c3317=t,c3318=t,c3319=t,c3320=t,c3321=t,c3322=t,c3323=t,c3324=t,c3325=t,c3326=t,c3327=t,c3328=t,c3329=t,c3330=t,c3331=t,c3332=t,c3333=t,c3334=t,c3335=t,c3336=t,c3337=t,c3338=t,c3339=t,c3340=t,c3341=t,c3342=t,c3343=t,c3344=t,c3345=t,c3346=t,c3347=t,c3348=t,c3349=t,c3350=t,c3351=t,c3352=t,c3353=t,c3354=t,c3355=t,c3356=t,c3357=t,c3358=t,c3359=t,c3360=t,c3361=t,c3362=t,c3363=t,c3364=t,c3365=t,c3366=t,c3367=t,c3368=t,c3369=t,c3370=t,c3371=t,c3372=t,c3373=t,c3374=t,c3375=t,c3376=t,c3377=t,c3378=t,c3379=t,c3380=t,c3381=t,c3382=t,c3383=t,c3384=t,c3385=t,c3386=t,c3387=t,c3388=t,c3389=t,c3390=t,c3391=t,c3392=t,c3393=t,c3394=t,c3395=t,c3396=t,c3397=t,c3398=t,c3399=t,c3400=t,c3401=t,c3402=t,c3403=t,c3404=t,c3405=t,c3406=t,c3407=t,c3408=t,c3409=t,c3410=t,c3411=t,c3412=t,c3413=t,c3414=t,c3415=t,c3416=t,c3417=t,c3418=t,c3419=t,c3420=t,c3421=t,c3422=t,c3423=t,c3424=t,c3425=t,c3426=t,c3427=t,c3428=t,c3429=t,c3430=t,c3431=t,c3432=t,c3433=t,c3434=t,c3435=t,c3436=t,c3437=t,c3438=t,c3439=t,c3440=t,c3441=t,c3442=t,c3443=t,c3444=t,c3445=t,c3446=t,c3447=t,c3448=t,c3449=t,c3450=t,c3451=t,c3452=t,c3453=t,c3454=t,c3455=t,c3456=t,c3457=t,c3458=t,c3459=t,"
+ "c3460=t,c3461=t,c3462=t,c3463=t,c3464=t,c3465=t,c3466=t,c3467=t,c3468=t,c3469=t,c3470=t,c3471=t,c3472=t,c3473=t,c3474=t,c3475=t,c3476=t,c3477=t,c3478=t,c3479=t,c3480=t,c3481=t,c3482=t,c3483=t,c3484=t,c3485=t,c3486=t,c3487=t,c3488=t,c3489=t,c3490=t,c3491=t,c3492=t,c3493=t,c3494=t,c3495=t,c3496=t,c3497=t,c3498=t,c3499=t,c3500=t,c3501=t,c3502=t,c3503=t,c3504=t,c3505=t,c3506=t,c3507=t,c3508=t,c3509=t,c3510=t,c3511=t,c3512=t,c3513=t,"
+ "c3514=t,c3515=t,c3516=t,c3517=t,c3518=t,c3519=t,c3520=t,c3521=t,c3522=t,c3523=t,c3524=t,c3525=t,c3526=t,c3527=t,c3528=t,c3529=t,c3530=t,c3531=t,c3532=t,c3533=t,c3534=t,c3535=t,c3536=t,c3537=t,c3538=t,c3539=t,c3540=t,c3541=t,c3542=t,c3543=t,c3544=t,c3545=t,c3546=t,c3547=t,c3548=t,c3549=t,c3550=t,c3551=t,c3552=t,c3553=t,c3554=t,c3555=t,c3556=t,c3557=t,c3558=t,c3559=t,c3560=t,c3561=t,c3562=t,c3563=t,c3564=t,c3565=t,c3566=t,c3567=t,c3568=t,c3569=t,c3570=t,c3571=t,c3572=t,c3573=t,c3574=t,c3575=t,c3576=t,c3577=t,c3578=t,c3579=t,c3580=t,c3581=t,c3582=t,c3583=t,c3584=t,c3585=t,c3586=t,c3587=t,c3588=t,c3589=t,c3590=t,c3591=t,c3592=t,c3593=t,c3594=t,c3595=t,c3596=t,c3597=t,c3598=t,c3599=t,c3600=t,c3601=t,c3602=t,c3603=t,c3604=t,c3605=t,c3606=t,c3607=t,c3608=t,c3609=t,c3610=t,c3611=t,c3612=t,c3613=t,c3614=t,c3615=t,c3616=t,c3617=t,c3618=t,c3619=t,c3620=t,c3621=t,c3622=t,c3623=t,c3624=t,c3625=t,c3626=t,c3627=t,c3628=t,c3629=t,c3630=t,c3631=t,c3632=t,c3633=t,c3634=t,c3635=t,c3636=t,c3637=t,c3638=t,c3639=t,c3640=t,c3641=t,c3642=t,c3643=t,c3644=t,c3645=t,c3646=t,c3647=t,c3648=t,c3649=t,c3650=t,c3651=t,c3652=t,c3653=t,c3654=t,c3655=t,c3656=t,c3657=t,c3658=t,c3659=t,c3660=t,"
+ "c3661=t,c3662=t,c3663=t,c3664=t,c3665=t,c3666=t,c3667=t,c3668=t,c3669=t,c3670=t,c3671=t,c3672=t,c3673=t,c3674=t,c3675=t,c3676=t,c3677=t,c3678=t,c3679=t,c3680=t,c3681=t,c3682=t,c3683=t,c3684=t,c3685=t,c3686=t,c3687=t,c3688=t,c3689=t,c3690=t,c3691=t,c3692=t,c3693=t,c3694=t,c3695=t,c3696=t,c3697=t,c3698=t,c3699=t,c3700=t,c3701=t,c3702=t,c3703=t,c3704=t,c3705=t,c3706=t,c3707=t,c3708=t,c3709=t,c3710=t,c3711=t,c3712=t,c3713=t,c3714=t,c3715=t,c3716=t,c3717=t,c3718=t,c3719=t,c3720=t,c3721=t,c3722=t,c3723=t,c3724=t,c3725=t,c3726=t,c3727=t,c3728=t,c3729=t,c3730=t,c3731=t,c3732=t,c3733=t,c3734=t,c3735=t,c3736=t,c3737=t,c3738=t,c3739=t,c3740=t,c3741=t,c3742=t,c3743=t,c3744=t,c3745=t,c3746=t,c3747=t,c3748=t,c3749=t,c3750=t,c3751=t,c3752=t,c3753=t,c3754=t,c3755=t,c3756=t,c3757=t,c3758=t,c3759=t,c3760=t,c3761=t,c3762=t,c3763=t,c3764=t,c3765=t,c3766=t,c3767=t,c3768=t,c3769=t,c3770=t,c3771=t,c3772=t,c3773=t,c3774=t,c3775=t,c3776=t,c3777=t,c3778=t,c3779=t,c3780=t,c3781=t,c3782=t,c3783=t,c3784=t,c3785=t,c3786=t,c3787=t,c3788=t,c3789=t,c3790=t,c3791=t,c3792=t,c3793=t,c3794=t,c3795=t,c3796=t,c3797=t,c3798=t,c3799=t,c3800=t,c3801=t,c3802=t,c3803=t,c3804=t,c3805=t,c3806=t,c3807=t,"
+ "c3808=t,c3809=t,c3810=t,c3811=t,c3812=t,c3813=t,c3814=t,c3815=t,c3816=t,c3817=t,c3818=t,c3819=t,c3820=t,c3821=t,c3822=t,c3823=t,c3824=t,c3825=t,c3826=t,c3827=t,c3828=t,c3829=t,c3830=t,c3831=t,c3832=t,c3833=t,c3834=t,c3835=t,c3836=t,c3837=t,c3838=t,c3839=t,c3840=t,c3841=t,c3842=t,c3843=t,c3844=t,c3845=t,c3846=t,c3847=t,c3848=t,c3849=t,c3850=t,c3851=t,c3852=t,c3853=t,c3854=t,c3855=t,c3856=t,c3857=t,c3858=t,c3859=t,c3860=t,c3861=t,c3862=t,c3863=t,c3864=t,c3865=t,c3866=t,c3867=t,c3868=t,c3869=t,c3870=t,c3871=t,c3872=t,c3873=t,c3874=t,c3875=t,c3876=t,c3877=t,c3878=t,c3879=t,c3880=t,c3881=t,c3882=t,c3883=t,c3884=t,c3885=t,c3886=t,c3887=t,c3888=t,c3889=t,c3890=t,c3891=t,c3892=t,c3893=t,c3894=t,c3895=t,c3896=t,c3897=t,c3898=t,c3899=t,c3900=t,c3901=t,c3902=t,c3903=t,c3904=t,c3905=t,c3906=t,c3907=t,c3908=t,c3909=t,c3910=t,c3911=t,c3912=t,c3913=t,c3914=t,c3915=t,c3916=t,c3917=t,c3918=t,c3919=t,c3920=t,c3921=t,c3922=t,c3923=t,c3924=t,c3925=t,c3926=t,c3927=t,c3928=t,c3929=t,c3930=t,c3931=t,c3932=t,c3933=t,c3934=t,c3935=t,c3936=t,c3937=t,c3938=t,c3939=t,c3940=t,c3941=t,c3942=t,c3943=t,c3944=t,c3945=t,c3946=t,c3947=t,c3948=t,c3949=t,c3950=t,c3951=t,c3952=t,c3953=t,c3954=t,"
+ "c3955=t,c3956=t,c3957=t,c3958=t,c3959=t,c3960=t,c3961=t,c3962=t,c3963=t,c3964=t,c3965=t,c3966=t,c3967=t,c3968=t,c3969=t,c3970=t,c3971=t,c3972=t,c3973=t,c3974=t,c3975=t,c3976=t,c3977=t,c3978=t,c3979=t,c3980=t,c3981=t,c3982=t,c3983=t,c3984=t,c3985=t,c3986=t,c3987=t,c3988=t,c3989=t,c3990=t,c3991=t,c3992=t,c3993=t,c3994=t,c3995=t,c3996=t,c3997=t,c3998=t,c3999=t,c4000=t,c4001=t,c4002=t,c4003=t,c4004=t,c4005=t,c4006=t,c4007=t,c4008=t,c4009=t,c4010=t,c4011=t,c4012=t,c4013=t,c4014=t,c4015=t,c4016=t,c4017=t,c4018=t,c4019=t,c4020=t,c4021=t,c4022=t,c4023=t,c4024=t,c4025=t,c4026=t,c4027=t,c4028=t,c4029=t,c4030=t,c4031=t,c4032=t,c4033=t,c4034=t,c4035=t,c4036=t,c4037=t,c4038=t,c4039=t,c4040=t,c4041=t,c4042=t,c4043=t,c4044=t,c4045=t,c4046=t,c4047=t,c4048=t,c4049=t,c4050=t,c4051=t,c4052=t,c4053=t,c4054=t,c4055=t,c4056=t,c4057=t,c4058=t,c4059=t,c4060=t,c4061=t,c4062=t,c4063=t,c4064=t,c4065=t,c4066=t,c4067=t,c4068=t,c4069=t,c4070=t,c4071=t,c4072=t,c4073=t,c4074=t,c4075=t,c4076=t,c4077=t,c4078=t,c4079=t,c4080=t,c4081=t,c4082=t,c4083=t,c4084=t,c4085=t,c4086=t,c4087=t,c4088=t,c4089=t,c4090=t,c4091=t,c4092=t,c4093=t 1626006833640000000"
+ };
+
+ int ret = TSDB_CODE_SUCCESS;
+ for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
+ ret = smlParseInfluxLine(info, sql[i]);
+ if(ret != TSDB_CODE_SUCCESS) break;
+ }
+ ASSERT_NE(ret, 0);
+ smlDestroyInfo(info);
+}
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 65041e1f129d6d73f9a5a13678bd1dcd5efe733b..dffef21ac49502534e0b42deaf9359c8562d0997 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -15,342 +15,347 @@
#include "systable.h"
#include "taos.h"
+#include "taosdef.h"
#include "tdef.h"
#include "tgrant.h"
+#include "tmsg.h"
#include "types.h"
#define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
+// clang-format off
static const SSysDbTableSchema dnodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
- {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
- {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
+ {.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema mnodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema modulesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema qnodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema snodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema bnodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema clusterSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema userDBSchema[] = {
- {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
- {.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
+ {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
+ {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
+ {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = true},
+ {.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
+ {.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
};
static const SSysDbTableSchema userFuncSchema[] = {
- {.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
+ {.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
};
static const SSysDbTableSchema userIdxSchema[] = {
- {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysDbTableSchema userStbsSchema[] = {
- {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema streamSchema[] = {
- {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema userTblsSchema[] = {
- {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema userTagsSchema[] = {
- {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "tag_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "tag_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema userTblDistSchema[] = {
- {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE},
- {.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
+ {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE, .sysInfo = true},
+ {.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
};
static const SSysDbTableSchema userUsersSchema[] = {
- {.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "super", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "enable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "sysinfo", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "super", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false},
+ {.name = "enable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false},
+ {.name = "sysinfo", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
GRANTS_SCHEMA;
static const SSysDbTableSchema vgroupsSchema[] = {
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
};
static const SSysDbTableSchema smaSchema[] = {
- {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
+ {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
};
static const SSysDbTableSchema transSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "last_action_info",
- .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE,
- .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "last_action_info", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema configSchema[] = {
- {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema variablesSchema[] = {
{.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+};
+
+static const SSysDbTableSchema topicSchema[] = {
+ {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ // TODO config
+};
+
+
+static const SSysDbTableSchema subscriptionSchema[] = {
+ {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
};
static const SSysTableMeta infosMeta[] = {
- {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema)},
- {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)},
- {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)},
- {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)},
+ {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
+ {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
+ {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true},
+ {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true},
// {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
// {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
- {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)},
- {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema)},
- {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
- {TSDB_INS_TABLE_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
- {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema)},
- {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema)},
- {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema)},
+ {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true},
+ {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema), false},
+ {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema), false},
+ {TSDB_INS_TABLE_INDEXES, userIdxSchema, tListLen(userIdxSchema), false},
+ {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema), false},
+ {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema), false},
+ {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema), false},
// {TSDB_INS_TABLE_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
- {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema)},
- {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)},
- {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)},
- {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema)},
- {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema)},
+ {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema), false},
+ {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema), true},
+ {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true},
+ {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true},
+ {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true},
+ {TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
+ {TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
+ {TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
};
static const SSysDbTableSchema connectionsSchema[] = {
- {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
- {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
- {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false},
+ {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false},
+ {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
-static const SSysDbTableSchema topicSchema[] = {
- {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- // TODO config
-};
static const SSysDbTableSchema consumerSchema[] = {
- {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},*/
- {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
-};
-
-static const SSysDbTableSchema subscriptionSchema[] = {
- {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
+ {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},*/
+ {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysDbTableSchema offsetSchema[] = {
- {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
+ {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
};
static const SSysDbTableSchema querySchema[] = {
- {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
- {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
- {.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false},
+ {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false},
+ {.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema appSchema[] = {
- {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "slow_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "slow_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysTableMeta perfsMeta[] = {
- {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)},
- {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema)},
- {TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema)},
- {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema)},
- {TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema)},
+ {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema), false},
+ {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema), false},
+ {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema), false},
// {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
- {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)},
- {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)},
- {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
- {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}};
+ {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema), false},
+ // {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema), false},
+ {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema), false}};
+// clang-format on
void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) {
if (pInfosTableMeta) {
@@ -369,3 +374,26 @@ void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size) {
*size = tListLen(perfsMeta);
}
}
+
+void getVisibleInfosTablesNum(bool sysInfo, size_t* size) {
+ if (sysInfo) {
+ getInfosDbMeta(NULL, size);
+ return;
+ }
+ *size = 0;
+ const SSysTableMeta* pMeta = NULL;
+ size_t totalNum = 0;
+ getInfosDbMeta(&pMeta, &totalNum);
+ for (size_t i = 0; i < totalNum; ++i) {
+ if (!pMeta[i].sysInfo) {
+ ++(*size);
+ }
+ }
+}
+
+bool invisibleColumn(bool sysInfo, int8_t tableType, int8_t flags) {
+ if (sysInfo || TSDB_SYSTEM_TABLE != tableType) {
+ return false;
+ }
+ return 0 != (flags & COL_IS_SYSINFO);
+}
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index c65e966046912edb6f8c0ca77db3f55d24710785..c7f372f17b3c174290396c0e0ca49229ff8df73b 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -1228,6 +1228,7 @@ void blockDataFreeRes(SSDataBlock* pBlock) {
}
taosArrayDestroy(pBlock->pDataBlock);
+ pBlock->pDataBlock = NULL;
taosMemoryFreeClear(pBlock->pBlockAgg);
memset(&pBlock->info, 0, sizeof(SDataBlockInfo));
}
@@ -1706,8 +1707,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag) {
- SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock));
- taosArrayPush(dataBlocks, pBlock);
+ SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock*));
+ taosArrayPush(dataBlocks, &pBlock);
blockDebugShowDataBlocks(dataBlocks, flag);
taosArrayDestroy(dataBlocks);
}
diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c
index 8eeab77a157993bd8d89479b221982d3b1e5c336..b40f449a0550140784250b9c2250d191552e4652 100644
--- a/source/common/src/tdataformat.c
+++ b/source/common/src/tdataformat.c
@@ -1064,6 +1064,26 @@ _err:
return code;
}
+void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid) {
+ uint8_t *p = NULL;
+ int8_t isLarge = pTag->flags & TD_TAG_LARGE;
+ int16_t offset = 0;
+
+ if (isLarge) {
+ p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag];
+ } else {
+ p = (uint8_t *)&pTag->idx[pTag->nTag];
+ }
+
+ if (isLarge) {
+ offset = ((int16_t *)pTag->idx)[iTag];
+ } else {
+ offset = pTag->idx[iTag];
+ }
+
+ tPutI16v(p + offset, cid);
+}
+
#if 1 // ===================================================================================================================
int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) {
if (pBuilder == NULL) return -1;
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index adc5af1a17cf7a66c673c61e031a35ea3a968d38..908f7c014e9e1088a8fa3f7243aa545f56acc0bc 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -61,6 +61,7 @@ int32_t tsNumOfVnodeStreamThreads = 2;
int32_t tsNumOfVnodeFetchThreads = 4;
int32_t tsNumOfVnodeWriteThreads = 2;
int32_t tsNumOfVnodeSyncThreads = 2;
+int32_t tsNumOfVnodeRsmaThreads = 2;
int32_t tsNumOfQnodeQueryThreads = 4;
int32_t tsNumOfQnodeFetchThreads = 4;
int32_t tsNumOfSnodeSharedThreads = 2;
@@ -76,7 +77,7 @@ bool tsMonitorComp = false;
// telem
bool tsEnableTelem = true;
-int32_t tsTelemInterval = 86400;
+int32_t tsTelemInterval = 43200;
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com";
uint16_t tsTelemPort = 80;
@@ -90,6 +91,7 @@ bool tsSmlDataFormat =
// query
int32_t tsQueryPolicy = 1;
int32_t tsQuerySmaOptimize = 0;
+bool tsQueryPlannerTrace = false;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@@ -164,6 +166,7 @@ int32_t tsMqRebalanceInterval = 2;
int32_t tsTtlUnit = 86400;
int32_t tsTtlPushInterval = 86400;
int32_t tsGrantHBInterval = 60;
+int32_t tsUptimeInterval = 300; // seconds
#ifndef _STORAGE
int32_t taosSetTfsCfg(SConfig *pCfg) {
@@ -284,6 +287,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 3, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
+ if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1;
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
@@ -377,6 +381,10 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1;
+ tsNumOfVnodeRsmaThreads = tsNumOfCores;
+ tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
+ if (cfgAddInt32(pCfg, "numOfVnodeRsmaThreads", tsNumOfVnodeRsmaThreads, 1, 1024, 0) != 0) return -1;
+
tsNumOfQnodeQueryThreads = tsNumOfCores * 2;
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
@@ -414,12 +422,158 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1;
+ if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, 1) != 0) return -1;
if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1;
GRANT_CFG_ADD;
return 0;
}
+static int32_t taosUpdateServerCfg(SConfig *pCfg) {
+ SConfigItem *pItem;
+ ECfgSrcType stype;
+ int32_t numOfCores;
+ int64_t totalMemoryKB;
+
+ pItem = cfgGetItem(tsCfg, "numOfCores");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ numOfCores = pItem->fval;
+ }
+
+ pItem = cfgGetItem(tsCfg, "supportVnodes");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSupportVnodes = numOfCores * 2;
+ tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
+ pItem->i32 = tsNumOfSupportVnodes;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfRpcThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfRpcThreads = numOfCores / 2;
+ tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
+ pItem->i32 = tsNumOfRpcThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfCommitThreads = numOfCores / 2;
+ tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
+ pItem->i32 = tsNumOfCommitThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfMnodeReadThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfMnodeReadThreads = numOfCores / 8;
+ tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
+ pItem->i32 = tsNumOfMnodeReadThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeQueryThreads = numOfCores * 2;
+ tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfVnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeStreamThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeStreamThreads = numOfCores / 4;
+ tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
+ pItem->i32 = tsNumOfVnodeStreamThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeFetchThreads = numOfCores / 4;
+ tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfVnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeWriteThreads = numOfCores;
+ tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
+ pItem->i32 = tsNumOfVnodeWriteThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeSyncThreads = numOfCores * 2;
+ tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
+ pItem->i32 = tsNumOfVnodeSyncThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeRsmaThreads = numOfCores;
+ tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
+ pItem->i32 = tsNumOfVnodeRsmaThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeQueryThreads = numOfCores * 2;
+ tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfQnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeFetchThreads = numOfCores / 2;
+ tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfQnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeSharedThreads = numOfCores / 4;
+ tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeSharedThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeUniqueThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeUniqueThreads = numOfCores / 4;
+ tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeUniqueThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "totalMemoryKB");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ totalMemoryKB = pItem->i64;
+ }
+
+ pItem = cfgGetItem(tsCfg, "rpcQueueMemoryAllowed");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsRpcQueueMemoryAllowed = totalMemoryKB * 1024 * 0.1;
+ tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
+ pItem->i64 = tsRpcQueueMemoryAllowed;
+ pItem->stype = stype;
+ }
+
+ return 0;
+}
+
static void taosSetClientLogCfg(SConfig *pCfg) {
SConfigItem *pItem = cfgGetItem(pCfg, "logDir");
tstrncpy(tsLogDir, cfgGetItem(pCfg, "logDir")->str, PATH_MAX);
@@ -490,6 +644,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
+ tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
return 0;
}
@@ -538,6 +693,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
+ tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
@@ -560,6 +716,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32;
+ tsUptimeInterval = cfgGetItem(pCfg, "uptimeInterval")->i32;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
@@ -782,6 +939,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
} else if (strcasecmp("numOfVnodeSyncThreads", name) == 0) {
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
+ } else if (strcasecmp("numOfVnodeRsmaThreads", name) == 0) {
+ tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
} else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) {
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
} else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) {
@@ -815,6 +974,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsQnodeShmSize = cfgGetItem(pCfg, "qnodeShmSize")->i32;
} else if (strcasecmp("qDebugFlag", name) == 0) {
qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32;
+ } else if (strcasecmp("queryPlannerTrace", name) == 0) {
+ tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
}
break;
}
@@ -970,9 +1131,9 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
taosSetServerLogCfg(pCfg);
}
- taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32);
+ taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
- if (taosMulMkDir(tsLogDir) != 0) {
+ if (taosMulModeMkDir(tsLogDir, 0777) != 0) {
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
cfgCleanup(pCfg);
return -1;
@@ -1037,6 +1198,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
if (taosSetClientCfg(tsCfg)) return -1;
} else {
if (taosSetClientCfg(tsCfg)) return -1;
+ if (taosUpdateServerCfg(tsCfg)) return -1;
if (taosSetServerCfg(tsCfg)) return -1;
if (taosSetTfsCfg(tsCfg) != 0) return -1;
}
@@ -1061,7 +1223,7 @@ void taosCleanupCfg() {
void taosCfgDynamicOptions(const char *option, const char *value) {
if (strncasecmp(option, "debugFlag", 9) == 0) {
int32_t flag = atoi(value);
- taosSetAllDebugFlag(flag);
+ taosSetAllDebugFlag(flag, true);
return;
}
@@ -1083,14 +1245,14 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
}
const char *options[] = {
- "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
- "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
- "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
+ "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
+ "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
+ "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "jniDebugFlag",
};
int32_t *optionVars[] = {
- &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
- &tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
- &tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
+ &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, &tqDebugFlag,
+ &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag, &tmrDebugFlag, &uDebugFlag,
+ &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag, &jniDebugFlag,
};
int32_t optionSize = tListLen(options);
@@ -1102,41 +1264,42 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t flag = atoi(value);
uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
*optionVars[d] = flag;
- taosSetDebugFlag(optionVars[d], optName, flag);
+ taosSetDebugFlag(optionVars[d], optName, flag, true);
return;
}
uError("failed to cfg dynamic option:%s value:%s", option, value);
}
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) {
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
- if (pItem != NULL) {
+ if (pItem != NULL && (rewrite || pItem->i32 == 0)) {
pItem->i32 = flagVal;
}
*pFlagPtr = flagVal;
}
-void taosSetAllDebugFlag(int32_t flag) {
+void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
if (flag <= 0) return;
- taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
- taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
- taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
- taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
- taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
- taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
- taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
- taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
- taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
- taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
- taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
- taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
- taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
- taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
- taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
- taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
- taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
- taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
+ taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "tmrDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 533d924546eb553045fea97e6b50cdb42489d714..b056677a0389f178a9d438d4bc452752d5d59d08 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -3196,12 +3196,16 @@ static int32_t tDecodeSTableMetaRsp(SDecoder *pDecoder, STableMetaRsp *pRsp) {
if (tDecodeI32(pDecoder, &pRsp->vgId) < 0) return -1;
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
- pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
- if (pRsp->pSchemas == NULL) return -1;
+ if (totalCols > 0) {
+ pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
+ if (pRsp->pSchemas == NULL) return -1;
- for (int32_t i = 0; i < totalCols; ++i) {
- SSchema *pSchema = &pRsp->pSchemas[i];
- if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1;
+ for (int32_t i = 0; i < totalCols; ++i) {
+ SSchema *pSchema = &pRsp->pSchemas[i];
+ if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1;
+ }
+ } else {
+ pRsp->pSchemas = NULL;
}
return 0;
@@ -3326,7 +3330,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
return 0;
}
-void tFreeSTableMetaRsp(STableMetaRsp *pRsp) { taosMemoryFreeClear(pRsp->pSchemas); }
+void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); }
void tFreeSTableIndexRsp(void *info) {
if (NULL == info) {
@@ -3630,6 +3634,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (tEncodeU32(&encoder, pRsp->connId) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->dnodeNum) < 0) return -1;
if (tEncodeI8(&encoder, pRsp->superUser) < 0) return -1;
+ if (tEncodeI8(&encoder, pRsp->sysInfo) < 0) return -1;
if (tEncodeI8(&encoder, pRsp->connType) < 0) return -1;
if (tEncodeSEpSet(&encoder, &pRsp->epSet) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->svrTimestamp) < 0) return -1;
@@ -3652,6 +3657,7 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (tDecodeU32(&decoder, &pRsp->connId) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->dnodeNum) < 0) return -1;
if (tDecodeI8(&decoder, &pRsp->superUser) < 0) return -1;
+ if (tDecodeI8(&decoder, &pRsp->sysInfo) < 0) return -1;
if (tDecodeI8(&decoder, &pRsp->connType) < 0) return -1;
if (tDecodeSEpSet(&decoder, &pRsp->epSet) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->svrTimestamp) < 0) return -1;
@@ -5090,6 +5096,10 @@ int tEncodeSVCreateTbRsp(SEncoder *pCoder, const SVCreateTbRsp *pRsp) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI32(pCoder, pRsp->code) < 0) return -1;
+ if (tEncodeI32(pCoder, pRsp->pMeta ? 1 : 0) < 0) return -1;
+ if (pRsp->pMeta) {
+ if (tEncodeSTableMetaRsp(pCoder, pRsp->pMeta) < 0) return -1;
+ }
tEndEncode(pCoder);
return 0;
@@ -5100,15 +5110,38 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) {
if (tDecodeI32(pCoder, &pRsp->code) < 0) return -1;
+ int32_t meta = 0;
+ if (tDecodeI32(pCoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(pCoder, pRsp->pMeta) < 0) return -1;
+ } else {
+ pRsp->pMeta = NULL;
+ }
+
tEndDecode(pCoder);
return 0;
}
+void tFreeSVCreateTbRsp(void *param) {
+ if (NULL == param) {
+ return;
+ }
+
+ SVCreateTbRsp *pRsp = (SVCreateTbRsp *)param;
+ if (pRsp->pMeta) {
+ taosMemoryFree(pRsp->pMeta->pSchemas);
+ taosMemoryFree(pRsp->pMeta);
+ }
+}
+
// TDMT_VND_DROP_TABLE =================
static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
+ if (tEncodeU64(pCoder, pReq->suid) < 0) return -1;
if (tEncodeI8(pCoder, pReq->igNotExists) < 0) return -1;
tEndEncode(pCoder);
@@ -5119,6 +5152,7 @@ static int32_t tDecodeSVDropTbReq(SDecoder *pCoder, SVDropTbReq *pReq) {
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
+ if (tDecodeU64(pCoder, &pReq->suid) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->igNotExists) < 0) return -1;
tEndDecode(pCoder);
@@ -5292,6 +5326,10 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl
if (tEncodeI32v(pEncoder, pBlock->numOfRows) < 0) return -1;
if (tEncodeI32v(pEncoder, pBlock->affectedRows) < 0) return -1;
if (tEncodeI64v(pEncoder, pBlock->sver) < 0) return -1;
+ if (tEncodeI32(pEncoder, pBlock->pMeta ? 1 : 0) < 0) return -1;
+ if (pBlock->pMeta) {
+ if (tEncodeSTableMetaRsp(pEncoder, pBlock->pMeta) < 0) return -1;
+ }
tEndEncode(pEncoder);
return 0;
@@ -5310,6 +5348,16 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
+ int32_t meta = 0;
+ if (tDecodeI32(pDecoder, &meta) < 0) return -1;
+ if (meta) {
+ pBlock->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pBlock->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(pDecoder, pBlock->pMeta) < 0) return -1;
+ } else {
+ pBlock->pMeta = NULL;
+ }
+
tEndDecode(pDecoder);
return 0;
}
@@ -5347,6 +5395,20 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
return 0;
}
+void tFreeSSubmitBlkRsp(void *param) {
+ if (NULL == param) {
+ return;
+ }
+
+ SSubmitBlkRsp *pRsp = (SSubmitBlkRsp *)param;
+
+ taosMemoryFree(pRsp->tblFName);
+ if (pRsp->pMeta) {
+ taosMemoryFree(pRsp->pMeta->pSchemas);
+ taosMemoryFree(pRsp->pMeta);
+ }
+}
+
void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
if (NULL == pRsp) return;
@@ -5558,9 +5620,60 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) {
}
}
+int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1;
+ if (pRsp->pMeta->pSchemas) {
+ if (tEncodeSTableMetaRsp(pEncoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndEncode(pEncoder);
+ return 0;
+}
+
+int32_t tDecodeSMCreateStbRsp(SDecoder *pDecoder, SMCreateStbRsp *pRsp) {
+ int32_t meta = 0;
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeI32(pDecoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(pDecoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndDecode(pDecoder);
+ return 0;
+}
+
+int32_t tDeserializeSMCreateStbRsp(void *buf, int32_t bufLen, SMCreateStbRsp *pRsp) {
+ int32_t meta = 0;
+ SDecoder decoder = {0};
+ tDecoderInit(&decoder, buf, bufLen);
+
+ if (tStartDecode(&decoder) < 0) return -1;
+ if (tDecodeI32(&decoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(&decoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndDecode(&decoder);
+ tDecoderClear(&decoder);
+ return 0;
+}
+
+void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) {
+ if (NULL == pRsp) {
+ return;
+ }
+
+ if (pRsp->pMeta) {
+ taosMemoryFree(pRsp->pMeta->pSchemas);
+ taosMemoryFree(pRsp->pMeta);
+ }
+}
+
int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) {
if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1;
- if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tEncodeI64(pEncoder, pOffsetVal->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@@ -5575,7 +5688,7 @@ int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal)
int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
if (tDecodeI8(pDecoder, &pOffsetVal->type) < 0) return -1;
- if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tDecodeI64(pDecoder, &pOffsetVal->uid) < 0) return -1;
if (tDecodeI64(pDecoder, &pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@@ -5597,10 +5710,8 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
snprintf(buf, maxLen, "offset(reset to latest)");
} else if (pVal->type == TMQ_OFFSET__LOG) {
snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
- } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
snprintf(buf, maxLen, "offset(ss data) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
- } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
- snprintf(buf, maxLen, "offset(ss meta) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
} else {
ASSERT(0);
}
@@ -5614,9 +5725,7 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
} else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_DATA) {
return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
} else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_META) {
- ASSERT(0);
- // TODO
- return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
+ return pLeft->uid == pRight->uid;
} else {
ASSERT(0);
/*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/
@@ -5701,6 +5810,21 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1;
return 0;
}
+
+int32_t tEncodeSMqMetaRsp(SEncoder *pEncoder, const SMqMetaRsp *pRsp) {
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
+ if (tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
+ if (tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
+ return 0;
+}
+
+int32_t tDecodeSMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) {
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
+ if (tDecodeI16(pDecoder, &pRsp->resMsgType) < 0) return -1;
+ if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t *)&pRsp->metaRspLen) < 0) return -1;
+ return 0;
+}
+
int32_t tEncodeSMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
@@ -5767,6 +5891,110 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
return 0;
}
+void tDeleteSMqDataRsp(SMqDataRsp *pRsp) {
+ taosArrayDestroy(pRsp->blockDataLen);
+ taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
+}
+
+int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ if (tEncodeI8(pEncoder, pRsp->withTbName) < 0) return -1;
+ if (tEncodeI8(pEncoder, pRsp->withSchema) < 0) return -1;
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i);
+ void *data = taosArrayGetP(pRsp->blockData, i);
+ if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1;
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i);
+ if (tEncodeSSchemaWrapper(pEncoder, pSW) < 0) return -1;
+ }
+ if (pRsp->withTbName) {
+ char *tbName = (char *)taosArrayGetP(pRsp->blockTbName, i);
+ if (tEncodeCStr(pEncoder, tbName) < 0) return -1;
+ }
+ }
+ }
+ if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *createTableReq = taosArrayGetP(pRsp->createTableReq, i);
+ int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i);
+ if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
+ if (tDecodeI8(pDecoder, &pRsp->withTbName) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pRsp->withSchema) < 0) return -1;
+ if (pRsp->withTbName) {
+ pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+ if (pRsp->withSchema) {
+ pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ void *data;
+ uint64_t bLen;
+ if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1;
+ taosArrayPush(pRsp->blockData, &data);
+ int32_t len = bLen;
+ taosArrayPush(pRsp->blockDataLen, &len);
+
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosMemoryCalloc(1, sizeof(SSchemaWrapper));
+ if (pSW == NULL) return -1;
+ if (tDecodeSSchemaWrapper(pDecoder, pSW) < 0) return -1;
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ }
+
+ if (pRsp->withTbName) {
+ char *tbName;
+ if (tDecodeCStrAlloc(pDecoder, &tbName) < 0) return -1;
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ }
+ }
+ }
+ if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *));
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *pCreate = NULL;
+ uint64_t len;
+ if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1;
+ int32_t l = (int32_t)len;
+ taosArrayPush(pRsp->createTableLen, &l);
+ taosArrayPush(pRsp->createTableReq, &pCreate);
+ }
+ }
+ return 0;
+}
+
+void tDeleteSTaosxRsp(STaosxRsp *pRsp) {
+ taosArrayDestroy(pRsp->blockDataLen);
+ taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
+
+ taosArrayDestroy(pRsp->createTableLen);
+ taosArrayDestroyP(pRsp->createTableReq, (FDelete)taosMemoryFree);
+}
+
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
diff --git a/source/common/src/tvariant.c b/source/common/src/tvariant.c
index 0810be149716e58fdac74b67db6946fde7db62e9..a01c393441c0a4b6945226ba2c77ffe1a23ced57 100644
--- a/source/common/src/tvariant.c
+++ b/source/common/src/tvariant.c
@@ -155,8 +155,8 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
void taosVariantDestroy(SVariant *pVar) {
if (pVar == NULL) return;
- if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR
- || pVar->nType == TSDB_DATA_TYPE_JSON) {
+ if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR ||
+ pVar->nType == TSDB_DATA_TYPE_JSON) {
taosMemoryFreeClear(pVar->pz);
pVar->nLen = 0;
}
@@ -185,8 +185,8 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
pDst->nType = pSrc->nType;
- if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR
- || pSrc->nType == TSDB_DATA_TYPE_JSON) {
+ if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR ||
+ pSrc->nType == TSDB_DATA_TYPE_JSON) {
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
char *p = taosMemoryRealloc(pDst->pz, len);
assert(p);
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index a059db6b00b7896289346eae0016dedfe95db400..b91b82b72e9cfd730bba4382136d427c215bf844 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -301,7 +301,7 @@ int32_t dmInitServer(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
SRpcInit rpcInit = {0};
- strncpy(rpcInit.localFqdn, tsLocalFqdn, strlen(tsLocalFqdn));
+ strncpy(rpcInit.localFqdn, tsLocalFqdn, TSDB_FQDN_LEN);
rpcInit.localPort = tsServerPort;
rpcInit.label = "DND-S";
rpcInit.numOfThreads = tsNumOfRpcThreads;
diff --git a/source/dnode/mnode/impl/inc/mndCluster.h b/source/dnode/mnode/impl/inc/mndCluster.h
index 0de253fb6adebf053eb1eb6afc68693f1fbc6747..2cb41edd7c1d37c8dab6f0e276259e9cc530fea8 100644
--- a/source/dnode/mnode/impl/inc/mndCluster.h
+++ b/source/dnode/mnode/impl/inc/mndCluster.h
@@ -27,6 +27,7 @@ void mndCleanupCluster(SMnode *pMnode);
int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len);
int64_t mndGetClusterId(SMnode *pMnode);
int64_t mndGetClusterCreateTime(SMnode *pMnode);
+float mndGetClusterUpTime(SMnode *pMnode);
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h
index 8cff7fe48e0d9a2c25b0fda797a098d9e370d506..ea05215fe90d30708013fe4b1c8fc08d2be8d3d6 100644
--- a/source/dnode/mnode/impl/inc/mndDef.h
+++ b/source/dnode/mnode/impl/inc/mndDef.h
@@ -179,6 +179,7 @@ typedef struct {
char name[TSDB_CLUSTER_ID_LEN];
int64_t createdTime;
int64_t updateTime;
+ int32_t upTime;
} SClusterObj;
typedef struct {
diff --git a/source/dnode/mnode/impl/inc/mndInfoSchema.h b/source/dnode/mnode/impl/inc/mndInfoSchema.h
index b10d92ee3de1a0e06d801c9a8840751a9f52f37c..4f98465cd170280d8c9f5e9356c37cebf26f9bd0 100644
--- a/source/dnode/mnode/impl/inc/mndInfoSchema.h
+++ b/source/dnode/mnode/impl/inc/mndInfoSchema.h
@@ -24,7 +24,8 @@ extern "C" {
int32_t mndInitInfos(SMnode *pMnode);
void mndCleanupInfos(SMnode *pMnode);
-int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp);
+int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, bool sysinfo,
+ STableMetaRsp *pRsp);
int32_t mndBuildInsTableCfg(SMnode *pMnode, const char *dbFName, const char *tbName, STableCfgRsp *pRsp);
#ifdef __cplusplus
diff --git a/source/dnode/mnode/impl/inc/mndStb.h b/source/dnode/mnode/impl/inc/mndStb.h
index 010199a89fcf28131371b589f344a5053e891620..8f0d55e10061ce4517c4305ae7450a7439b91cfd 100644
--- a/source/dnode/mnode/impl/inc/mndStb.h
+++ b/source/dnode/mnode/impl/inc/mndStb.h
@@ -35,6 +35,7 @@ SDbObj *mndAcquireDbByStb(SMnode *pMnode, const char *stbName);
int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreate, SDbObj *pDb);
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb);
void mndFreeStb(SStbObj *pStb);
+int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen);
void mndExtractDbNameFromStbFullName(const char *stbFullName, char *dst);
void mndExtractTbNameFromStbFullName(const char *stbFullName, char *dst, int32_t dstSize);
diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c
index a82bf739f5feae48e444a7100f45aef19f7c6803..7d633f90bd937c24b82094bdc0fa6d30c30bc250 100644
--- a/source/dnode/mnode/impl/src/mndCluster.c
+++ b/source/dnode/mnode/impl/src/mndCluster.c
@@ -19,7 +19,7 @@
#include "mndTrans.h"
#define CLUSTER_VER_NUMBE 1
-#define CLUSTER_RESERVE_SIZE 64
+#define CLUSTER_RESERVE_SIZE 60
static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster);
static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw);
@@ -29,6 +29,7 @@ static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOldCluster, SCl
static int32_t mndCreateDefaultCluster(SMnode *pMnode);
static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter);
+static int32_t mndProcessUptimeTimer(SRpcMsg *pReq);
int32_t mndInitCluster(SMnode *pMnode) {
SSdbTable table = {
@@ -42,8 +43,10 @@ int32_t mndInitCluster(SMnode *pMnode) {
.deleteFp = (SdbDeleteFp)mndClusterActionDelete,
};
+ mndSetMsgHandle(pMnode, TDMT_MND_UPTIME_TIMER, mndProcessUptimeTimer);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndRetrieveClusters);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndCancelGetNextCluster);
+
return sdbSetTable(pMnode->pSdb, table);
}
@@ -62,40 +65,69 @@ int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len) {
return 0;
}
-int64_t mndGetClusterId(SMnode *pMnode) {
- SSdb *pSdb = pMnode->pSdb;
- void *pIter = NULL;
- int64_t clusterId = -1;
+static SClusterObj *mndAcquireCluster(SMnode *pMnode) {
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
while (1) {
SClusterObj *pCluster = NULL;
pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster);
if (pIter == NULL) break;
+ return pCluster;
+ }
+
+ return NULL;
+}
+
+static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster) {
+ SSdb *pSdb = pMnode->pSdb;
+ sdbRelease(pSdb, pCluster);
+}
+
+int64_t mndGetClusterId(SMnode *pMnode) {
+ int64_t clusterId = 0;
+ SClusterObj *pCluster = mndAcquireCluster(pMnode);
+ if (pCluster != NULL) {
clusterId = pCluster->id;
- sdbRelease(pSdb, pCluster);
+ mndReleaseCluster(pMnode, pCluster);
}
return clusterId;
}
int64_t mndGetClusterCreateTime(SMnode *pMnode) {
- SSdb *pSdb = pMnode->pSdb;
- void *pIter = NULL;
- int64_t createTime = INT64_MAX;
-
- while (1) {
- SClusterObj *pCluster = NULL;
- pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster);
- if (pIter == NULL) break;
-
+ int64_t createTime = 0;
+ SClusterObj *pCluster = mndAcquireCluster(pMnode);
+ if (pCluster != NULL) {
createTime = pCluster->createdTime;
- sdbRelease(pSdb, pCluster);
+ mndReleaseCluster(pMnode, pCluster);
}
return createTime;
}
+static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) {
+#if 0
+ int32_t upTime = taosGetTimestampSec() - pCluster->updateTime / 1000;
+ upTime = upTime + pCluster->upTime;
+ return upTime;
+#else
+ return pCluster->upTime;
+#endif
+}
+
+float mndGetClusterUpTime(SMnode *pMnode) {
+ int64_t upTime = 0;
+ SClusterObj *pCluster = mndAcquireCluster(pMnode);
+ if (pCluster != NULL) {
+ upTime = mndGetClusterUpTimeImp(pCluster);
+ mndReleaseCluster(pMnode, pCluster);
+ }
+
+ return upTime / 86400.0f;
+}
+
static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -107,6 +139,7 @@ static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) {
SDB_SET_INT64(pRaw, dataPos, pCluster->createdTime, _OVER)
SDB_SET_INT64(pRaw, dataPos, pCluster->updateTime, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pCluster->upTime, _OVER)
SDB_SET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER)
terrno = 0;
@@ -144,6 +177,7 @@ static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT64(pRaw, dataPos, &pCluster->createdTime, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pCluster->updateTime, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &pCluster->upTime, _OVER)
SDB_GET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER)
terrno = 0;
@@ -162,6 +196,7 @@ _OVER:
static int32_t mndClusterActionInsert(SSdb *pSdb, SClusterObj *pCluster) {
mTrace("cluster:%" PRId64 ", perform insert action, row:%p", pCluster->id, pCluster);
pSdb->pMnode->clusterId = pCluster->id;
+ pCluster->updateTime = taosGetTimestampMs();
return 0;
}
@@ -171,7 +206,10 @@ static int32_t mndClusterActionDelete(SSdb *pSdb, SClusterObj *pCluster) {
}
static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOld, SClusterObj *pNew) {
- mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p", pOld->id, pOld, pNew);
+ mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p, uptime from %d to %d", pOld->id, pOld,
+ pNew, pOld->upTime, pNew->upTime);
+ pOld->upTime = pNew->upTime;
+ pOld->updateTime = taosGetTimestampMs();
return 0;
}
@@ -242,6 +280,10 @@ static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, buf, false);
+ int32_t upTime = mndGetClusterUpTimeImp(pCluster);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)&upTime, false);
+
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pCluster->createdTime, false);
@@ -257,3 +299,40 @@ static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter) {
SSdb *pSdb = pMnode->pSdb;
sdbCancelFetch(pSdb, pIter);
}
+
+static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) {
+ SMnode *pMnode = pReq->info.node;
+ SClusterObj clusterObj = {0};
+ SClusterObj *pCluster = mndAcquireCluster(pMnode);
+ if (pCluster != NULL) {
+ memcpy(&clusterObj, pCluster, sizeof(SClusterObj));
+ clusterObj.upTime += tsUptimeInterval;
+ mndReleaseCluster(pMnode, pCluster);
+ }
+
+ if (clusterObj.id <= 0) {
+ mError("can't get cluster info while update uptime");
+ return 0;
+ }
+
+ mTrace("update cluster uptime to %" PRId64, clusterObj.upTime);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ if (pTrans == NULL) return -1;
+
+ SSdbRaw *pCommitRaw = mndClusterActionEncode(&clusterObj);
+ if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
+ mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+ sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
+
+ if (mndTransPrepare(pMnode, pTrans) != 0) {
+ mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
+ mndTransDrop(pTrans);
+ return -1;
+ }
+
+ mndTransDrop(pTrans);
+ return 0;
+}
diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c
index 853ace79fd79bd2c30684446d0c12f5640eb881c..fee322ecad9f83aef16ccc13623575eb0aa0544c 100644
--- a/source/dnode/mnode/impl/src/mndDb.c
+++ b/source/dnode/mnode/impl/src/mndDb.c
@@ -358,10 +358,10 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0;
if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF;
if (pCfg->walRetentionPeriod < 0 && pCfg->walRetentionPeriod != -1)
- pCfg->walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
+ pCfg->walRetentionPeriod = TSDB_REPS_DEF_DB_WAL_RET_PERIOD;
if (pCfg->walRetentionSize < 0 && pCfg->walRetentionSize != -1)
- pCfg->walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- if (pCfg->walRollPeriod < 0) pCfg->walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ pCfg->walRetentionSize = TSDB_REPS_DEF_DB_WAL_RET_SIZE;
+ if (pCfg->walRollPeriod < 0) pCfg->walRollPeriod = TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD;
if (pCfg->walSegmentSize < 0) pCfg->walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
}
@@ -1731,7 +1731,7 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
SDbObj infoschemaDb = {0};
setInformationSchemaDbCfg(&infoschemaDb);
size_t numOfTables = 0;
- getInfosDbMeta(NULL, &numOfTables);
+ getVisibleInfosTablesNum(sysinfo, &numOfTables);
mndDumpDbInfoData(pMnode, pBlock, &infoschemaDb, pShow, numOfRows, numOfTables, true, 0, 1);
numOfRows += 1;
diff --git a/source/dnode/mnode/impl/src/mndInfoSchema.c b/source/dnode/mnode/impl/src/mndInfoSchema.c
index bf33cf603f68ccedfdf69c972441021bdbcb0a53..09172115f8502e392c1d37ae1d256761afb02126 100644
--- a/source/dnode/mnode/impl/src/mndInfoSchema.c
+++ b/source/dnode/mnode/impl/src/mndInfoSchema.c
@@ -14,8 +14,8 @@
*/
#define _DEFAULT_SOURCE
-#include "systable.h"
#include "mndInt.h"
+#include "systable.h"
static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t colNum, SSchema **pDst) {
SSchema *schema = taosMemoryCalloc(colNum, sizeof(SSchema));
@@ -29,6 +29,9 @@ static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t co
schema[i].type = pSrc[i].type;
schema[i].colId = i + 1;
schema[i].bytes = pSrc[i].bytes;
+ if (pSrc[i].sysInfo) {
+ schema[i].flags |= COL_IS_SYSINFO;
+ }
}
*pDst = schema;
@@ -43,13 +46,14 @@ static int32_t mndInsInitMeta(SHashObj *hash) {
meta.sversion = 1;
meta.tversion = 1;
- size_t size = 0;
- const SSysTableMeta* pInfosTableMeta = NULL;
+ size_t size = 0;
+ const SSysTableMeta *pInfosTableMeta = NULL;
getInfosDbMeta(&pInfosTableMeta, &size);
for (int32_t i = 0; i < size; ++i) {
tstrncpy(meta.tbName, pInfosTableMeta[i].name, sizeof(meta.tbName));
meta.numOfColumns = pInfosTableMeta[i].colNum;
+ meta.sysInfo = pInfosTableMeta[i].sysInfo;
if (mndInitInfosTableSchema(pInfosTableMeta[i].schema, pInfosTableMeta[i].colNum, &meta.pSchemas)) {
return -1;
@@ -64,14 +68,15 @@ static int32_t mndInsInitMeta(SHashObj *hash) {
return 0;
}
-int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp) {
+int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, bool sysinfo,
+ STableMetaRsp *pRsp) {
if (NULL == pMnode->infosMeta) {
terrno = TSDB_CODE_APP_NOT_READY;
return -1;
}
STableMetaRsp *pMeta = taosHashGet(pMnode->infosMeta, tbName, strlen(tbName));
- if (NULL == pMeta) {
+ if (NULL == pMeta || (!sysinfo && pMeta->sysInfo)) {
mError("invalid information schema table name:%s", tbName);
terrno = TSDB_CODE_MND_INVALID_SYS_TABLENAME;
return -1;
@@ -121,7 +126,6 @@ int32_t mndBuildInsTableCfg(SMnode *pMnode, const char *dbFName, const char *tbN
return 0;
}
-
int32_t mndInitInfos(SMnode *pMnode) {
pMnode->infosMeta = taosHashInit(20, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK);
if (pMnode->infosMeta == NULL) {
diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c
index 1693ef7d6502d753595ad6af9596b0eb775829bc..2221718023c8d080059736fd811c946618fd948d 100644
--- a/source/dnode/mnode/impl/src/mndMain.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -100,6 +100,16 @@ static void mndGrantHeartBeat(SMnode *pMnode) {
}
}
+static void mndIncreaseUpTime(SMnode *pMnode) {
+ int32_t contLen = 0;
+ void *pReq = mndBuildTimerMsg(&contLen);
+ if (pReq != NULL) {
+ SRpcMsg rpcMsg = {
+ .msgType = TDMT_MND_UPTIME_TIMER, .pCont = pReq, .contLen = contLen, .info.ahandle = (void *)0x9528};
+ tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
+ }
+}
+
static void *mndThreadFp(void *param) {
SMnode *pMnode = param;
int64_t lastTime = 0;
@@ -122,13 +132,17 @@ static void *mndThreadFp(void *param) {
mndCalMqRebalance(pMnode);
}
- if (lastTime % (tsTelemInterval * 10) == 1) {
+ if (lastTime % (tsTelemInterval * 10) == ((tsTelemInterval - 1) * 10)) {
mndPullupTelem(pMnode);
}
if (lastTime % (tsGrantHBInterval * 10) == 0) {
mndGrantHeartBeat(pMnode);
}
+
+ if ((lastTime % (tsUptimeInterval * 10)) == ((tsUptimeInterval - 1) * 10)) {
+ mndIncreaseUpTime(pMnode);
+ }
}
return NULL;
@@ -556,7 +570,8 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
}
if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0;
if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER ||
- pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER) {
+ pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER ||
+ pMsg->msgType == TDMT_MND_UPTIME_TIMER) {
return -1;
}
@@ -705,7 +720,8 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
if (pObj->id == pMnode->selfDnodeId) {
pClusterInfo->first_ep_dnode_id = pObj->id;
tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep));
- pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f);
+ pClusterInfo->master_uptime = mndGetClusterUpTime(pMnode);
+ // pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f);
tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role));
} else {
tstrncpy(desc.role, syncStr(pObj->state), sizeof(desc.role));
diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c
index 4f07d9e0143f52da057c40d2e655044da01a6b72..71bda4d4f34213a7b3240f6634b26579fb66c1ee 100644
--- a/source/dnode/mnode/impl/src/mndMnode.c
+++ b/source/dnode/mnode/impl/src/mndMnode.c
@@ -89,14 +89,14 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
+ mInfo("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL);
if (pTrans == NULL) {
mError("mnode:%d, failed to create since %s", mnodeObj.id, terrstr());
return -1;
}
- mDebug("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id);
+ mInfo("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id);
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
@@ -365,7 +365,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
+ mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
@@ -392,7 +392,7 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("mnode:%d, start to create", createReq.dnodeId);
+ mInfo("mnode:%d, start to create", createReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_MNODE) != 0) {
goto _OVER;
}
@@ -574,7 +574,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
+ mInfo("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
if (mndSetDropMnodeInfoToTrans(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@@ -597,7 +597,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("mnode:%d, start to drop", dropReq.dnodeId);
+ mInfo("mnode:%d, start to drop", dropReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) {
goto _OVER;
}
@@ -732,7 +732,7 @@ static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) {
}
}
- mTrace("trans:-1, sync reconfig will be proposed");
+ mInfo("trans:-1, sync reconfig will be proposed");
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
pMgmt->standby = 0;
diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c
index e55c562e38c207600956cd1eafbb88d744750f7d..e8737e30c9817bd71d1b3a47f245ef0004603dc3 100644
--- a/source/dnode/mnode/impl/src/mndProfile.c
+++ b/source/dnode/mnode/impl/src/mndProfile.c
@@ -270,6 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
SConnectRsp connectRsp = {0};
connectRsp.acctId = pUser->acctId;
connectRsp.superUser = pUser->superUser;
+ connectRsp.sysInfo = pUser->sysInfo;
connectRsp.clusterId = pMnode->clusterId;
connectRsp.connId = pConn->id;
connectRsp.connType = connReq.connType;
diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c
index 9499c90c57c59e3600c701668dd17671f641d919..5a998dfe986d9f012e066f45810604b7ca9d728f 100644
--- a/source/dnode/mnode/impl/src/mndShow.c
+++ b/source/dnode/mnode/impl/src/mndShow.c
@@ -88,7 +88,7 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_VGROUP;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_CONSUMERS, len) == 0) {
type = TSDB_MGMT_TABLE_CONSUMERS;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_SUBSCRIPTIONS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_SUBSCRIPTIONS, len) == 0) {
type = TSDB_MGMT_TABLE_SUBSCRIPTIONS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_TRANS, len) == 0) {
type = TSDB_MGMT_TABLE_TRANS;
@@ -102,9 +102,9 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_QUERIES;
} else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) {
type = TSDB_MGMT_TABLE_VNODES;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_TOPICS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_TOPICS, len) == 0) {
type = TSDB_MGMT_TABLE_TOPICS;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_STREAMS, len) == 0) {
type = TSDB_MGMT_TABLE_STREAMS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) {
type = TSDB_MGMT_TABLE_APPS;
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index 2fb934aaad735240e1a249447b5d041853819d82..8638cc511890066f45367253313aec8f626ceb8e 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -38,7 +38,6 @@ static SSdbRow *mndSmaActionDecode(SSdbRaw *pRaw);
static int32_t mndSmaActionInsert(SSdb *pSdb, SSmaObj *pSma);
static int32_t mndSmaActionDelete(SSdb *pSdb, SSmaObj *pSpSmatb);
static int32_t mndSmaActionUpdate(SSdb *pSdb, SSmaObj *pOld, SSmaObj *pNew);
-static int32_t mndSmaGetVgEpSet(SMnode *pMnode, SDbObj *pDb, SVgEpSet **ppVgEpSet, int32_t *numOfVgroups);
static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq);
static int32_t mndProcessDropSmaReq(SRpcMsg *pReq);
static int32_t mndProcessGetSmaReq(SRpcMsg *pReq);
@@ -841,6 +840,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
_OVER:
mndTransDrop(pTrans);
+ mndReleaseStream(pMnode, pStream);
mndReleaseVgroup(pMnode, pVgroup);
mndReleaseStb(pMnode, pStb);
return code;
@@ -961,6 +961,7 @@ _OVER:
mError("sma:%s, failed to drop since %s", dropReq.name, terrstr());
}
+ mndReleaseSma(pMnode, pSma);
mndReleaseDb(pMnode, pDb);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index ebec3d5ea686c3a976adf5d4890f2a7eb7d8be82..dc8285740a4bdf9e0bfb04c36e780aca0f32f758 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -536,7 +536,7 @@ int32_t mndCheckCreateStbReq(SMCreateStbReq *pCreate) {
return -1;
}
- if (pCreate->numOfColumns < TSDB_MIN_COLUMNS || pCreate->numOfColumns > TSDB_MAX_COLUMNS) {
+ if (pCreate->numOfColumns < TSDB_MIN_COLUMNS || pCreate->numOfTags + pCreate->numOfColumns > TSDB_MAX_COLUMNS) {
terrno = TSDB_CODE_PAR_INVALID_COLUMNS_NUM;
return -1;
}
@@ -1774,6 +1774,67 @@ static int32_t mndBuildSMAlterStbRsp(SDbObj *pDb, SStbObj *pObj, void **pCont, i
return 0;
}
+int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen) {
+ int32_t ret = -1;
+ SDbObj *pDb = mndAcquireDb(pMnode, dbFName);
+ if (NULL == pDb) {
+ return -1;
+ }
+
+ SStbObj *pObj = mndAcquireStb(pMnode, stbFName);
+ if (NULL == pObj) {
+ goto _OVER;
+ }
+
+ SEncoder ec = {0};
+ uint32_t contLen = 0;
+ SMCreateStbRsp stbRsp = {0};
+ SName name = {0};
+ tNameFromString(&name, pObj->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+
+ stbRsp.pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == stbRsp.pMeta) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _OVER;
+ }
+
+ ret = mndBuildStbSchemaImp(pDb, pObj, name.tname, stbRsp.pMeta);
+ if (ret) {
+ tFreeSMCreateStbRsp(&stbRsp);
+ goto _OVER;
+ }
+
+ tEncodeSize(tEncodeSMCreateStbRsp, &stbRsp, contLen, ret);
+ if (ret) {
+ tFreeSMCreateStbRsp(&stbRsp);
+ goto _OVER;
+ }
+
+ void *cont = taosMemoryMalloc(contLen);
+ tEncoderInit(&ec, cont, contLen);
+ tEncodeSMCreateStbRsp(&ec, &stbRsp);
+ tEncoderClear(&ec);
+
+ tFreeSMCreateStbRsp(&stbRsp);
+
+ *pCont = cont;
+ *pLen = contLen;
+
+ ret = 0;
+
+_OVER:
+ if (pObj) {
+ mndReleaseStb(pMnode, pObj);
+ }
+
+ if (pDb) {
+ mndReleaseDb(pMnode, pDb);
+ }
+
+ return ret;
+}
+
+
static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp,
void *alterOriData, int32_t alterOriDataLen) {
int32_t code = -1;
@@ -2157,6 +2218,10 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) {
STableInfoReq infoReq = {0};
STableMetaRsp metaRsp = {0};
+ SUserObj *pUser = mndAcquireUser(pMnode, pReq->info.conn.user);
+ if (pUser == NULL) return 0;
+ bool sysinfo = pUser->sysInfo;
+
if (tDeserializeSTableInfoReq(pReq->pCont, pReq->contLen, &infoReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
@@ -2164,7 +2229,7 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) {
if (0 == strcmp(infoReq.dbFName, TSDB_INFORMATION_SCHEMA_DB)) {
mDebug("information_schema table:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName);
- if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, &metaRsp) != 0) {
+ if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, sysinfo, &metaRsp) != 0) {
goto _OVER;
}
} else if (0 == strcmp(infoReq.dbFName, TSDB_PERFORMANCE_SCHEMA_DB)) {
@@ -2203,6 +2268,7 @@ _OVER:
mError("stb:%s.%s, failed to retrieve meta since %s", infoReq.dbFName, infoReq.tbName, terrstr());
}
+ mndReleaseUser(pMnode, pUser);
tFreeSTableMetaRsp(&metaRsp);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 6dc8e2072b71df78aa88aecdd924a98db658ab05..dd7a9e71eaa634a5bda506b318c6c4472a48726b 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -631,6 +631,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
SStreamObj *pStream = NULL;
SDbObj *pDb = NULL;
SCMCreateStreamReq createStreamReq = {0};
+ SStreamObj streamObj = {0};
if (tDeserializeSCMCreateStreamReq(pReq->pCont, pReq->contLen, &createStreamReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
@@ -659,7 +660,6 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
}
// build stream obj from request
- SStreamObj streamObj = {0};
if (mndBuildStreamObjFromCreateReq(pMnode, &streamObj, &createStreamReq) < 0) {
/*ASSERT(0);*/
mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr());
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 10e520d9ec49a53e5fcedcf668a40732480aa75b..1452c5ae2fd3e9cde7cb9052d22e10bfd31afb0f 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -287,6 +287,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
if (consumerVgNum > minVgCnt) {
if (imbCnt < imbConsumerNum) {
if (consumerVgNum == minVgCnt + 1) {
+ imbCnt++;
continue;
} else {
// pop until equal minVg + 1
diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c
index 8e8cff853c7504ffeaced773db99d22fa44a3568..e8b75e6a94e1089b037be9ec42a4fdc9deef3b3c 100644
--- a/source/dnode/mnode/impl/src/mndSync.c
+++ b/source/dnode/mnode/impl/src/mndSync.c
@@ -50,7 +50,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
pMgmt->errCode = cbMeta.code;
- mDebug("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
+ mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
" role:%s raw:%p",
transId, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex, syncStr(cbMeta.state),
pRaw);
@@ -88,7 +88,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
}
int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot, void *pReaderParam, void **ppReader) {
- mDebug("start to read snapshot from sdb in atomic way");
+ mInfo("start to read snapshot from sdb in atomic way");
SMnode *pMnode = pFsm->data;
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, &pSnapshot->lastApplyIndex, &pSnapshot->lastApplyTerm,
&pSnapshot->lastConfigIndex);
@@ -136,13 +136,13 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
}
int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void *pParam, void **ppReader) {
- mDebug("start to read snapshot from sdb");
+ mInfo("start to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, NULL, NULL, NULL);
}
int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) {
- mDebug("stop to read snapshot from sdb");
+ mInfo("stop to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStopRead(pMnode->pSdb, pReader);
}
@@ -174,12 +174,12 @@ int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int
void mndLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
SMnode *pMnode = pFsm->data;
atomic_store_8(&(pMnode->syncMgmt.leaderTransferFinish), 1);
- mDebug("vgId:1, mnode leader transfer finish");
+ mInfo("vgId:1, mnode leader transfer finish");
}
static void mndBecomeFollower(struct SSyncFSM *pFsm) {
SMnode *pMnode = pFsm->data;
- mDebug("vgId:1, become follower and post sem");
+ mInfo("vgId:1, become follower and post sem");
taosWLockLatch(&pMnode->syncMgmt.lock);
if (pMnode->syncMgmt.transId != 0) {
@@ -190,7 +190,7 @@ static void mndBecomeFollower(struct SSyncFSM *pFsm) {
}
static void mndBecomeLeader(struct SSyncFSM *pFsm) {
- mDebug("vgId:1, become leader");
+ mInfo("vgId:1, become leader");
SMnode *pMnode = pFsm->data;
}
@@ -284,7 +284,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
return -1;
} else {
pMgmt->transId = transId;
- mDebug("trans:%d, will be proposed", pMgmt->transId);
+ mInfo("trans:%d, will be proposed", pMgmt->transId);
taosWUnLockLatch(&pMgmt->lock);
}
@@ -314,7 +314,7 @@ void mndSyncStart(SMnode *pMnode) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
syncSetMsgCb(pMgmt->sync, &pMnode->msgCb);
syncStart(pMgmt->sync);
- mDebug("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby);
+ mInfo("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby);
}
void mndSyncStop(SMnode *pMnode) {
diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c
index 27814fe5bea155c54fa32789efbaf2ae30cdb29b..93f7531a272860d63351ff1a008fa11f48b5a17c 100644
--- a/source/dnode/mnode/impl/src/mndTelem.c
+++ b/source/dnode/mnode/impl/src/mndTelem.c
@@ -131,7 +131,9 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) {
char* pCont = mndBuildTelemetryReport(pMnode);
if (pCont != NULL) {
if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) {
- mError("failed to send telemetry msg");
+ mError("failed to send telemetry report");
+ } else {
+ mTrace("succeed to send telemetry report");
}
taosMemoryFree(pCont);
}
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index ff208eae607ab0fa57be7431771f209e18e02ce5..eb072d013d0024e5b05a172c3c3d5d55ce41cd40 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -763,8 +763,9 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
int32_t cols = 0;
char topicName[TSDB_TOPIC_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);
- tNameGetDbName(&n, varDataVal(topicName));
+ strcpy(varDataVal(topicName), mndGetDbStr(pTopic->name));
+ /*tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);*/
+ /*tNameGetDbName(&n, varDataVal(topicName));*/
varDataSetLen(topicName, strlen(varDataVal(topicName)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)topicName, false);
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index 17b4336465c23c49e71adde85a8b5291124c4f43..9c4a5afb032e6677997b7a84e919451b238b2068 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -17,6 +17,7 @@
#include "mndTrans.h"
#include "mndConsumer.h"
#include "mndDb.h"
+#include "mndStb.h"
#include "mndPrivilege.h"
#include "mndShow.h"
#include "mndSync.h"
@@ -455,11 +456,11 @@ static const char *mndTransStr(ETrnStage stage) {
}
static void mndTransTestStartFunc(SMnode *pMnode, void *param, int32_t paramLen) {
- mDebug("test trans start, param:%s, len:%d", (char *)param, paramLen);
+ mInfo("test trans start, param:%s, len:%d", (char *)param, paramLen);
}
static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen) {
- mDebug("test trans stop, param:%s, len:%d", (char *)param, paramLen);
+ mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen);
}
static TransCbFp mndTransGetCbFp(ETrnFunc ftype) {
@@ -706,7 +707,7 @@ int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, c
if (pTrans->oper == oper) {
if (strcasecmp(dbname, pTrans->dbname1) == 0) {
- mDebug("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper);
+ mInfo("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper);
if (pTrans->pRpcArray == NULL) {
pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo));
}
@@ -745,7 +746,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
}
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
+ mInfo("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id);
if (code != 0) {
mError("trans:%d, failed to sync since %s", pTrans->id, terrstr());
@@ -754,7 +755,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
}
sdbFreeRaw(pRaw);
- mDebug("trans:%d, sync finished", pTrans->id);
+ mInfo("trans:%d, sync finished", pTrans->id);
return 0;
}
@@ -820,12 +821,12 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
return -1;
}
- mDebug("trans:%d, prepare transaction", pTrans->id);
+ mInfo("trans:%d, prepare transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, prepare finished", pTrans->id);
+ mInfo("trans:%d, prepare finished", pTrans->id);
STrans *pNew = mndAcquireTrans(pMnode, pTrans->id);
if (pNew == NULL) {
@@ -846,22 +847,22 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
}
static int32_t mndTransCommit(SMnode *pMnode, STrans *pTrans) {
- mDebug("trans:%d, commit transaction", pTrans->id);
+ mInfo("trans:%d, commit transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to commit since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, commit finished", pTrans->id);
+ mInfo("trans:%d, commit finished", pTrans->id);
return 0;
}
static int32_t mndTransRollback(SMnode *pMnode, STrans *pTrans) {
- mDebug("trans:%d, rollback transaction", pTrans->id);
+ mInfo("trans:%d, rollback transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to rollback since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, rollback finished", pTrans->id);
+ mInfo("trans:%d, rollback finished", pTrans->id);
return 0;
}
@@ -893,30 +894,21 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
for (int32_t i = 0; i < size; ++i) {
SRpcHandleInfo *pInfo = taosArrayGet(pTrans->pRpcArray, i);
if (pInfo->handle != NULL) {
- mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage),
+ mInfo("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage),
pInfo->ahandle);
if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL;
}
SRpcMsg rspMsg = {.code = code, .info = *pInfo};
- if (pTrans->rpcRspLen != 0) {
- void *rpcCont = rpcMallocCont(pTrans->rpcRspLen);
- if (rpcCont != NULL) {
- memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen);
- rspMsg.pCont = rpcCont;
- rspMsg.contLen = pTrans->rpcRspLen;
- }
- }
-
if (pTrans->originRpcType == TDMT_MND_CREATE_DB) {
- mDebug("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType));
+ mInfo("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType));
SDbObj *pDb = mndAcquireDb(pMnode, pTrans->dbname1);
if (pDb != NULL) {
for (int32_t j = 0; j < 12; j++) {
bool ready = mndIsDbReady(pMnode, pDb);
if (!ready) {
- mDebug("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname1, j);
+ mInfo("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname1, j);
taosMsleep(1000);
} else {
break;
@@ -924,6 +916,21 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
}
}
mndReleaseDb(pMnode, pDb);
+ } else if (pTrans->originRpcType == TDMT_MND_CREATE_STB) {
+ void *pCont = NULL;
+ int32_t contLen = 0;
+ if (0 == mndBuildSMCreateStbRsp(pMnode, pTrans->dbname1, pTrans->dbname2, &pCont, &contLen) != 0) {
+ mndTransSetRpcRsp(pTrans, pCont, contLen);
+ }
+ }
+
+ if (pTrans->rpcRspLen != 0) {
+ void *rpcCont = rpcMallocCont(pTrans->rpcRspLen);
+ if (rpcCont != NULL) {
+ memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen);
+ rspMsg.pCont = rpcCont;
+ rspMsg.contLen = pTrans->rpcRspLen;
+ }
}
tmsgSendRsp(&rspMsg);
@@ -971,7 +978,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
pAction->errCode = pRsp->code;
}
- mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId,
+ mInfo("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId,
mndTransStr(pAction->stage), action, pRsp->code, pAction->acceptableCode, pAction->retryCode);
mndTransExecute(pMnode, pTrans);
@@ -987,10 +994,10 @@ static void mndTransResetAction(SMnode *pMnode, STrans *pTrans, STransAction *pA
if (pAction->errCode == TSDB_CODE_RPC_REDIRECT || pAction->errCode == TSDB_CODE_SYN_NEW_CONFIG_ERROR ||
pAction->errCode == TSDB_CODE_SYN_INTERNAL_ERROR || pAction->errCode == TSDB_CODE_SYN_NOT_LEADER) {
pAction->epSet.inUse = (pAction->epSet.inUse + 1) % pAction->epSet.numOfEps;
- mDebug("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage),
+ mInfo("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage),
pAction->id, pAction->epSet.inUse);
} else {
- mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id);
}
pAction->errCode = 0;
}
@@ -1017,7 +1024,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi
pAction->rawWritten = true;
pAction->errCode = 0;
code = 0;
- mDebug("trans:%d, %s:%d write to sdb, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
+ mInfo("trans:%d, %s:%d write to sdb, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
sdbTableName(pAction->pRaw->type), sdbStatusName(pAction->pRaw->status));
pTrans->lastAction = pAction->id;
@@ -1066,7 +1073,7 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio
pAction->msgSent = 1;
pAction->msgReceived = 0;
pAction->errCode = 0;
- mDebug("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail);
+ mInfo("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail);
pTrans->lastAction = pAction->id;
pTrans->lastMsgType = pAction->msgType;
@@ -1093,7 +1100,7 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio
static int32_t mndTransExecNullMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
pAction->rawWritten = 0;
pAction->errCode = 0;
- mDebug("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id);
pTrans->lastAction = pAction->id;
pTrans->lastMsgType = pAction->msgType;
@@ -1153,7 +1160,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA
pTrans->lastMsgType = 0;
memset(&pTrans->lastEpset, 0, sizeof(pTrans->lastEpset));
pTrans->lastErrorNo = 0;
- mDebug("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions);
+ mInfo("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions);
return 0;
} else {
mError("trans:%d, all %d actions executed, code:0x%x", pTrans->id, numOfActions, errCode & 0XFFFF);
@@ -1168,7 +1175,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA
return errCode;
}
} else {
- mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions);
+ mInfo("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions);
return TSDB_CODE_ACTION_IN_PROGRESS;
}
}
@@ -1214,7 +1221,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
code = pAction->errCode;
mndTransResetAction(pMnode, pTrans, pAction);
} else {
- mDebug("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action);
+ mInfo("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action);
}
} else {
code = TSDB_CODE_ACTION_IN_PROGRESS;
@@ -1223,7 +1230,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
code = pAction->errCode;
} else {
- mDebug("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action);
+ mInfo("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action);
}
} else {
}
@@ -1247,7 +1254,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
if (code == 0) {
pTrans->code = 0;
pTrans->redoActionPos++;
- mDebug("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage),
+ mInfo("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage),
pAction->id);
code = mndTransSync(pMnode, pTrans);
if (code != 0) {
@@ -1256,17 +1263,17 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
mndTransStr(pAction->stage), pAction->id, terrstr());
}
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
break;
} else if (code == pAction->retryCode) {
- mDebug("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code);
+ mInfo("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code);
taosMsleep(300);
action--;
continue;
} else {
terrno = code;
pTrans->code = code;
- mDebug("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id,
+ mInfo("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id,
mndTransStr(pAction->stage), pAction->id, code, pTrans->failedTimes);
break;
}
@@ -1278,7 +1285,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
pTrans->stage = TRN_STAGE_REDO_ACTION;
- mDebug("trans:%d, stage from prepare to redoAction", pTrans->id);
+ mInfo("trans:%d, stage from prepare to redoAction", pTrans->id);
return continueExec;
}
@@ -1297,10 +1304,10 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_COMMIT;
- mDebug("trans:%d, stage from redoAction to commit", pTrans->id);
+ mInfo("trans:%d, stage from redoAction to commit", pTrans->id);
continueExec = true;
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code));
+ mInfo("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code));
continueExec = false;
} else {
pTrans->failedTimes++;
@@ -1308,7 +1315,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
if (pTrans->policy == TRN_POLICY_ROLLBACK) {
if (pTrans->lastAction != 0) {
STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->lastAction);
- if (pAction->retryCode != 0 && pAction->retryCode != pAction->errCode) {
+ if (pAction->retryCode != 0 && pAction->retryCode == pAction->errCode) {
if (pTrans->failedTimes < 6) {
mError("trans:%d, stage keep on redoAction since action:%d code:0x%x not 0x%x, failedTimes:%d", pTrans->id,
pTrans->lastAction, pTrans->code, pAction->retryCode, pTrans->failedTimes);
@@ -1340,7 +1347,7 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_COMMIT_ACTION;
- mDebug("trans:%d, stage from commit to commitAction", pTrans->id);
+ mInfo("trans:%d, stage from commit to commitAction", pTrans->id);
continueExec = true;
} else {
pTrans->code = terrno;
@@ -1359,7 +1366,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_FINISHED;
- mDebug("trans:%d, stage from commitAction to finished", pTrans->id);
+ mInfo("trans:%d, stage from commitAction to finished", pTrans->id);
continueExec = true;
} else {
pTrans->code = terrno;
@@ -1377,10 +1384,10 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->stage = TRN_STAGE_FINISHED;
- mDebug("trans:%d, stage from undoAction to finished", pTrans->id);
+ mInfo("trans:%d, stage from undoAction to finished", pTrans->id);
continueExec = true;
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code));
+ mInfo("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code));
continueExec = false;
} else {
pTrans->failedTimes++;
@@ -1399,7 +1406,7 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->stage = TRN_STAGE_UNDO_ACTION;
- mDebug("trans:%d, stage from rollback to undoAction", pTrans->id);
+ mInfo("trans:%d, stage from rollback to undoAction", pTrans->id);
continueExec = true;
} else {
pTrans->failedTimes++;
@@ -1424,7 +1431,7 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr());
}
- mDebug("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
+ mInfo("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
return continueExec;
}
@@ -1432,7 +1439,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
while (continueExec) {
- mDebug("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
+ mInfo("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
pTrans->lastExecTime = taosGetTimestampMs();
switch (pTrans->stage) {
case TRN_STAGE_PREPARE:
diff --git a/source/dnode/mnode/impl/test/sma/CMakeLists.txt b/source/dnode/mnode/impl/test/sma/CMakeLists.txt
index 3f9ec123a80e88371a98fa54c99342726831372d..a55b45ca11d32f4aa0baa2462007f06e970ae3d6 100644
--- a/source/dnode/mnode/impl/test/sma/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/sma/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME smaTest
- COMMAND smaTest
-)
+if(NOT ${TD_WINDOWS})
+ add_test(
+ NAME smaTest
+ COMMAND smaTest
+ )
+endif(NOT ${TD_WINDOWS})
diff --git a/source/dnode/mnode/impl/test/stb/CMakeLists.txt b/source/dnode/mnode/impl/test/stb/CMakeLists.txt
index dcfbe658fcca82f928400b1e9eed2efcfb09a052..e3a3fc2e793fa84a5da05519ae727bb572edaa27 100644
--- a/source/dnode/mnode/impl/test/stb/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/stb/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME stbTest
- COMMAND stbTest
-)
\ No newline at end of file
+if(NOT ${TD_WINDOWS})
+ add_test(
+ NAME stbTest
+ COMMAND stbTest
+ )
+endif(NOT ${TD_WINDOWS})
\ No newline at end of file
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index ec27ba8ce659e454b768945aca50fb071d4e7b4f..5d4285b7c25e645dfccf18529cfd2173afa312cc 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -63,7 +63,7 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen);
int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list);
int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list);
-int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray* list);
+int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list);
void *vnodeGetIdx(SVnode *pVnode);
void *vnodeGetIvtIdx(SVnode *pVnode);
@@ -96,7 +96,7 @@ int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHash
int32_t metaReadNext(SMetaReader *pReader);
const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
-bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
+bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
typedef struct SMetaFltParam {
tb_uid_t suid;
@@ -128,8 +128,10 @@ typedef struct STsdbReader STsdbReader;
#define TIMEWINDOW_RANGE_CONTAINED 1
#define TIMEWINDOW_RANGE_EXTERNAL 2
-#define LASTROW_RETRIEVE_TYPE_ALL 0x1
-#define LASTROW_RETRIEVE_TYPE_SINGLE 0x2
+#define CACHESCAN_RETRIEVE_TYPE_ALL 0x1
+#define CACHESCAN_RETRIEVE_TYPE_SINGLE 0x2
+#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
+#define CACHESCAN_RETRIEVE_LAST 0x8
int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid);
int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader,
@@ -146,15 +148,40 @@ void *tsdbGetIdx(SMeta *pMeta);
void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
-int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
-int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
-int32_t tsdbLastrowReaderClose(void *pReader);
+int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
+int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
+int32_t tsdbCacherowsReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
// tq
+typedef struct SMetaTableInfo {
+ int64_t suid;
+ int64_t uid;
+ SSchemaWrapper *schema;
+ char tbName[TSDB_TABLE_NAME_LEN];
+} SMetaTableInfo;
+
+typedef struct SIdInfo {
+ int64_t version;
+ int32_t index;
+} SIdInfo;
+
+typedef struct SSnapContext {
+ SMeta *pMeta;
+ int64_t snapVersion;
+ TBC *pCur;
+ int64_t suid;
+ int8_t subType;
+ SHashObj *idVersion;
+ SHashObj *suidInfo;
+ SArray *idList;
+ int32_t index;
+ bool withMeta;
+ bool queryMetaOrData; // true-get meta, false-get data
+} SSnapContext;
typedef struct STqReader {
int64_t ver;
@@ -205,6 +232,13 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot);
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData);
+int32_t buildSnapContext(SMeta *pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
+ SSnapContext **ctxRet);
+int32_t getMetafromSnapShot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
+SMetaTableInfo getUidfromSnapShot(SSnapContext *ctx);
+int32_t setForSnapShot(SSnapContext *ctx, int64_t uid);
+int32_t destroySnapContext(SSnapContext *ctx);
+
// structs
struct STsdbCfg {
int8_t precision;
@@ -224,7 +258,9 @@ typedef struct {
int64_t numOfSTables;
int64_t numOfCTables;
int64_t numOfNTables;
+ int64_t numOfNTimeSeries;
int64_t numOfTimeSeries;
+ int64_t itvTimeSeries;
int64_t pointsWritten;
int64_t totalStorage;
int64_t compStorage;
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
index ca77042bb26d72f87471e4ac80329efc92449427..c29c4cb6c4e84db96ba5d419b17da97b86169ebe 100644
--- a/source/dnode/vnode/src/inc/sma.h
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -33,7 +33,6 @@ extern "C" {
// clang-format on
#define RSMA_TASK_INFO_HASH_SLOT (8)
-#define RSMA_EXECUTOR_MAX (1)
typedef struct SSmaEnv SSmaEnv;
typedef struct SSmaStat SSmaStat;
@@ -49,9 +48,12 @@ typedef struct SQTaskFWriter SQTaskFWriter;
struct SSmaEnv {
SRWLatch lock;
int8_t type;
+ int8_t flag; // 0x01 inClose
SSmaStat *pStat;
};
+#define SMA_ENV_FLG_CLOSE ((int8_t)0x1)
+
typedef struct {
int8_t inited;
int32_t rsetId;
@@ -93,7 +95,7 @@ struct SRSmaStat {
int64_t refId; // shared by fetch tasks
volatile int64_t nBufItems; // number of items in queue buffer
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
- volatile int8_t nExecutor; // [1, max(half of query threads, 4)]
+ volatile int32_t nFetchAll; // active number of fetch all
int8_t triggerStat; // shared by fetch tasks
int8_t commitStat; // 0 not in committing, 1 in committing
SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
@@ -107,6 +109,7 @@ struct SSmaStat {
SRSmaStat rsmaStat; // rollup sma
};
T_REF_DECLARE()
+ char data[];
};
#define SMA_STAT_TSMA(s) (&(s)->tsmaStat)
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index cb5ec7aabe48363f57b68238be80a6c124af9509..c3441a43f0e736881fc8bc491dd5717223645ed4 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -67,8 +67,7 @@ typedef struct {
// tqExec
typedef struct {
- char* qmsg;
- qTaskInfo_t task;
+ char* qmsg;
} STqExecCol;
typedef struct {
@@ -82,7 +81,8 @@ typedef struct {
typedef struct {
int8_t subType;
- STqReader* pExecReader;
+ STqReader* pExecReader;
+ qTaskInfo_t task;
union {
STqExecCol execCol;
STqExecTb execTb;
@@ -101,7 +101,6 @@ typedef struct {
int64_t snapshotVer;
- // TODO remove
SWalReader* pWalReader;
SWalRef* pRef;
@@ -141,11 +140,12 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle);
// tqRead
-int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* offset);
+int32_t tqScan(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset);
+int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset);
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
// tqExec
-int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp);
+int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp);
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp);
// tqMeta
@@ -176,17 +176,6 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
char* tqOffsetBuildFName(const char* path, int32_t ver);
int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
-static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
- pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
- pOffsetVal->uid = uid;
- pOffsetVal->ts = ts;
-}
-
-static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
- pOffsetVal->type = TMQ_OFFSET__LOG;
- pOffsetVal->version = ver;
-}
-
// tqStream
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask);
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 39c5f3873ed9884109c0dc28f66d314b12b83a99..9b252df58b2a87c4baf8453ad597d62e50b61a33 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -102,7 +102,7 @@ int metaCommit(SMeta* pMeta);
int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList);
-int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq);
+int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp **pMetaRsp);
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids);
int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids);
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp);
@@ -189,7 +189,6 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
int32_t smaInit();
void smaCleanUp();
int32_t smaOpen(SVnode* pVnode);
-int32_t smaPreClose(SSma* pSma);
int32_t smaClose(SSma* pSma);
int32_t smaBegin(SSma* pSma);
int32_t smaSyncPreCommit(SSma* pSma);
@@ -199,7 +198,6 @@ int32_t smaAsyncPreCommit(SSma* pSma);
int32_t smaAsyncCommit(SSma* pSma);
int32_t smaAsyncPostCommit(SSma* pSma);
int32_t smaDoRetention(SSma* pSma, int64_t now);
-int32_t smaProcessExec(SSma* pSma, void* pMsg);
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
@@ -323,7 +321,6 @@ struct SVnode {
TdThreadMutex lock;
bool blocked;
bool restored;
- bool inClose;
tsem_t syncSem;
SQHandle* pQuery;
};
diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c
index 805bc24d8c2824cb8e5e95df03c8b4b65ce25d6d..7df355a59b1a1099faf19daf13c73db3cc9ea095 100644
--- a/source/dnode/vnode/src/meta/metaQuery.c
+++ b/source/dnode/vnode/src/meta/metaQuery.c
@@ -615,11 +615,15 @@ int64_t metaGetTbNum(SMeta *pMeta) {
// N.B. Called by statusReq per second
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
- int64_t num = 0;
- vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
- pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+ if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 || ++pMeta->pVnode->config.vndStats.itvTimeSeries % 60 == 0) {
+ int64_t num = 0;
+ vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
+ pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
- return pMeta->pVnode->config.vndStats.numOfTimeSeries;
+ pMeta->pVnode->config.vndStats.itvTimeSeries = 0;
+ }
+
+ return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
}
typedef struct {
@@ -887,6 +891,37 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
if (!find) {
return NULL;
}
+
+#ifdef TAG_FILTER_DEBUG
+ if (IS_VAR_DATA_TYPE(val->type)) {
+ char *buf = taosMemoryCalloc(val->nData + 1, 1);
+ memcpy(buf, val->pData, val->nData);
+ metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double dval = 0;
+ GET_TYPED_DATA(dval, double, val->type, &val->i64);
+ metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval);
+ }
+
+ SArray *pTagVals = NULL;
+ tTagToValArray((STag *)pTag, &pTagVals);
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
+
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
+ memcpy(buf, pTagVal->pData, pTagVal->nData);
+ metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double dval = 0;
+ GET_TYPED_DATA(dval, double, pTagVal->type, &pTagVal->i64);
+ metaDebug("metaTag table number index:%d cid:%d type:%d value:%f", i, pTagVal->cid, pTagVal->type, dval);
+ }
+ }
+#endif
+
return val;
}
diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c
index 973c3814074685128395bd50243bba8981af4200..0edbd092e6b06883cc1e2b6be66e0ea55b8563a1 100644
--- a/source/dnode/vnode/src/meta/metaSnapshot.c
+++ b/source/dnode/vnode/src/meta/metaSnapshot.c
@@ -195,3 +195,434 @@ _err:
metaError("vgId:%d, vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
return code;
}
+
+typedef struct STableInfoForChildTable{
+ char *tableName;
+ SSchemaWrapper *schemaRow;
+ SSchemaWrapper *tagRow;
+}STableInfoForChildTable;
+
+static void destroySTableInfoForChildTable(void* data) {
+ STableInfoForChildTable* pData = (STableInfoForChildTable*)data;
+ taosMemoryFree(pData->tableName);
+ tDeleteSSchemaWrapper(pData->schemaRow);
+ tDeleteSSchemaWrapper(pData->tagRow);
+}
+
+static void MoveToSnapShotVersion(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ STbDbKey key = {.version = ctx->snapVersion, .uid = INT64_MAX};
+ int c = 0;
+ tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c);
+ if(c < 0){
+ tdbTbcMoveToPrev(ctx->pCur);
+ }
+}
+
+static int32_t MoveToPosition(SSnapContext* ctx, int64_t ver, int64_t uid){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ STbDbKey key = {.version = ver, .uid = uid};
+ int c = 0;
+ tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c);
+ return c;
+}
+
+static void MoveToFirst(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ tdbTbcMoveToFirst(ctx->pCur);
+}
+
+static void saveSuperTableInfoForChildTable(SMetaEntry *me, SHashObj *suidInfo){
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(suidInfo, &me->uid, sizeof(tb_uid_t));
+ if(data){
+ return;
+ }
+ STableInfoForChildTable dataTmp = {0};
+ dataTmp.tableName = strdup(me->name);
+
+ dataTmp.schemaRow = tCloneSSchemaWrapper(&me->stbEntry.schemaRow);
+ dataTmp.tagRow = tCloneSSchemaWrapper(&me->stbEntry.schemaTag);
+ taosHashPut(suidInfo, &me->uid, sizeof(tb_uid_t), &dataTmp, sizeof(STableInfoForChildTable));
+}
+
+int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta, SSnapContext** ctxRet){
+ SSnapContext* ctx = taosMemoryCalloc(1, sizeof(SSnapContext));
+ if(ctx == NULL) return -1;
+ *ctxRet = ctx;
+ ctx->pMeta = pMeta;
+ ctx->snapVersion = snapVersion;
+ ctx->suid = suid;
+ ctx->subType = subType;
+ ctx->queryMetaOrData = withMeta;
+ ctx->withMeta = withMeta;
+ ctx->idVersion = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
+ if(ctx->idVersion == NULL){
+ return -1;
+ }
+
+ ctx->suidInfo = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
+ if(ctx->suidInfo == NULL){
+ return -1;
+ }
+ taosHashSetFreeFp(ctx->suidInfo, destroySTableInfoForChildTable);
+
+ ctx->index = 0;
+ ctx->idList = taosArrayInit(100, sizeof(int64_t));
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen = 0, kLen = 0;
+
+ metaDebug("tmqsnap init snapVersion:%" PRIi64, ctx->snapVersion);
+ MoveToFirst(ctx);
+ while(1){
+ int32_t ret = tdbTbcNext(ctx->pCur, &pKey, &kLen, &pVal, &vLen);
+ if (ret < 0) break;
+ STbDbKey *tmp = (STbDbKey*)pKey;
+ if (tmp->version > ctx->snapVersion) break;
+
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t));
+ if(idData) {
+ continue;
+ }
+
+ if (tdbTbGet(pMeta->pUidIdx, &tmp->uid, sizeof(tb_uid_t), NULL, NULL) < 0) { // check if table exist for now, need optimize later
+ continue;
+ }
+
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ if(ctx->subType == TOPIC_SUB_TYPE__TABLE){
+ if ((me.uid != ctx->suid && me.type == TSDB_SUPER_TABLE) ||
+ (me.ctbEntry.suid != ctx->suid && me.type == TSDB_CHILD_TABLE)){
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ taosArrayPush(ctx->idList, &tmp->uid);
+ metaDebug("tmqsnap init idlist name:%s, uid:%" PRIi64, me.name, tmp->uid);
+ SIdInfo info = {0};
+ taosHashPut(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t), &info, sizeof(SIdInfo));
+
+ tDecoderClear(&dc);
+ }
+ taosHashClear(ctx->idVersion);
+
+ MoveToSnapShotVersion(ctx);
+ while(1){
+ int32_t ret = tdbTbcPrev(ctx->pCur, &pKey, &kLen, &pVal, &vLen);
+ if (ret < 0) break;
+
+ STbDbKey *tmp = (STbDbKey*)pKey;
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t));
+ if(idData){
+ continue;
+ }
+ SIdInfo info = {.version = tmp->version, .index = 0};
+ taosHashPut(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t), &info, sizeof(SIdInfo));
+
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ if(ctx->subType == TOPIC_SUB_TYPE__TABLE){
+ if ((me.uid != ctx->suid && me.type == TSDB_SUPER_TABLE) ||
+ (me.ctbEntry.suid != ctx->suid && me.type == TSDB_CHILD_TABLE)){
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_SUPER_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.uid == ctx->suid)) {
+ saveSuperTableInfoForChildTable(&me, ctx->suidInfo);
+ }
+ tDecoderClear(&dc);
+ }
+
+ for(int i = 0; i < taosArrayGetSize(ctx->idList); i++){
+ int64_t *uid = taosArrayGet(ctx->idList, i);
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, uid, sizeof(int64_t));
+ ASSERT(idData);
+ idData->index = i;
+ metaDebug("tmqsnap init idVersion uid:%" PRIi64 " version:%" PRIi64 " index:%d", *uid, idData->version, idData->index);
+ }
+
+ return TDB_CODE_SUCCESS;
+}
+
+int32_t destroySnapContext(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ taosArrayDestroy(ctx->idList);
+ taosHashCleanup(ctx->idVersion);
+ taosHashCleanup(ctx->suidInfo);
+ taosMemoryFree(ctx);
+ return 0;
+}
+
+static int32_t buildNormalChildTableInfo(SVCreateTbReq *req, void **pBuf, int32_t *contLen){
+ int32_t ret = 0;
+ SVCreateTbBatchReq reqs = {0};
+
+ reqs.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq));
+ if (NULL == reqs.pArray){
+ ret = -1;
+ goto end;
+ }
+ taosArrayPush(reqs.pArray, req);
+ reqs.nReqs = 1;
+
+ tEncodeSize(tEncodeSVCreateTbBatchReq, &reqs, *contLen, ret);
+ if(ret < 0){
+ ret = -1;
+ goto end;
+ }
+ *contLen += sizeof(SMsgHead);
+ *pBuf = taosMemoryMalloc(*contLen);
+ if (NULL == *pBuf) {
+ ret = -1;
+ goto end;
+ }
+ SEncoder coder = {0};
+ tEncoderInit(&coder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), *contLen);
+ if (tEncodeSVCreateTbBatchReq(&coder, &reqs) < 0) {
+ taosMemoryFreeClear(*pBuf);
+ tEncoderClear(&coder);
+ ret = -1;
+ goto end;
+ }
+ tEncoderClear(&coder);
+
+end:
+ taosArrayDestroy(reqs.pArray);
+ return ret;
+}
+
+static int32_t buildSuperTableInfo(SVCreateStbReq *req, void **pBuf, int32_t *contLen){
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVCreateStbReq, req, *contLen, ret);
+ if (ret < 0) {
+ return -1;
+ }
+
+ *contLen += sizeof(SMsgHead);
+ *pBuf = taosMemoryMalloc(*contLen);
+ if (NULL == *pBuf) {
+ return -1;
+ }
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), *contLen);
+ if (tEncodeSVCreateStbReq(&encoder, req) < 0) {
+ taosMemoryFreeClear(*pBuf);
+ tEncoderClear(&encoder);
+ return -1;
+ }
+ tEncoderClear(&encoder);
+ return 0;
+}
+
+int32_t setForSnapShot(SSnapContext* ctx, int64_t uid){
+ int c = 0;
+
+ if(uid == 0){
+ ctx->index = 0;
+ return c;
+ }
+
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, &uid, sizeof(tb_uid_t));
+ if(!idInfo){
+ return -1;
+ }
+
+ ctx->index = idInfo->index;
+
+ return c;
+}
+
+int32_t getMetafromSnapShot(SSnapContext* ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid){
+ int32_t ret = 0;
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen = 0, kLen = 0;
+
+ while(1){
+ if(ctx->index >= taosArrayGetSize(ctx->idList)){
+ metaDebug("tmqsnap get meta end");
+ ctx->index = 0;
+ ctx->queryMetaOrData = false; // change to get data
+ return 0;
+ }
+
+ int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index);
+ ctx->index++;
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t));
+ ASSERT(idInfo);
+
+ *uid = *uidTmp;
+ ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
+ if(ret == 0){
+ break;
+ }
+ metaDebug("tmqsnap get meta not exist uid:%" PRIi64 " version:%" PRIi64, *uid, idInfo->version);
+ }
+
+ tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ metaDebug("tmqsnap get meta uid:%" PRIi64 " name:%s index:%d", *uid, me.name, ctx->index-1);
+
+ if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_SUPER_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.uid == ctx->suid)) {
+ SVCreateStbReq req = {0};
+ req.name = me.name;
+ req.suid = me.uid;
+ req.schemaRow = me.stbEntry.schemaRow;
+ req.schemaTag = me.stbEntry.schemaTag;
+ req.schemaRow.version = 1;
+ req.schemaTag.version = 1;
+
+ ret = buildSuperTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_STB;
+
+ } else if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_CHILD_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid)) {
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ ASSERT(data);
+ SVCreateTbReq req = {0};
+
+ req.type = TSDB_CHILD_TABLE;
+ req.name = me.name;
+ req.uid = me.uid;
+ req.commentLen = -1;
+ req.ctb.suid = me.ctbEntry.suid;
+ req.ctb.tagNum = data->tagRow->nCols;
+ req.ctb.name = data->tableName;
+
+ SArray* tagName = taosArrayInit(req.ctb.tagNum, TSDB_COL_NAME_LEN);
+ STag* p = (STag*)me.ctbEntry.pTags;
+ if(tTagIsJson(p)){
+ if (p->nTag != 0) {
+ SSchema* schema = &data->tagRow->pSchema[0];
+ taosArrayPush(tagName, schema->name);
+ }
+ }else{
+ SArray* pTagVals = NULL;
+ if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
+ ASSERT(0);
+ }
+ int16_t nCols = taosArrayGetSize(pTagVals);
+ for (int j = 0; j < nCols; ++j) {
+ STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
+ for(int i = 0; i < data->tagRow->nCols; i++){
+ SSchema *schema = &data->tagRow->pSchema[i];
+ if(schema->colId == pTagVal->cid){
+ taosArrayPush(tagName, schema->name);
+ }
+ }
+ }
+ }
+// SIdInfo* sidInfo = (SIdInfo*)taosHashGet(ctx->idVersion, &me.ctbEntry.suid, sizeof(tb_uid_t));
+// if(sidInfo->version >= idInfo->version){
+// // need parse tag
+// STag* p = (STag*)me.ctbEntry.pTags;
+// SArray* pTagVals = NULL;
+// if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
+// }
+//
+// int16_t nCols = taosArrayGetSize(pTagVals);
+// for (int j = 0; j < nCols; ++j) {
+// STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
+// }
+// }else{
+ req.ctb.pTag = me.ctbEntry.pTags;
+// }
+
+ req.ctb.tagName = tagName;
+ ret = buildNormalChildTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_TABLE;
+ taosArrayDestroy(tagName);
+ } else if(ctx->subType == TOPIC_SUB_TYPE__DB){
+ SVCreateTbReq req = {0};
+ req.type = TSDB_NORMAL_TABLE;
+ req.name = me.name;
+ req.uid = me.uid;
+ req.commentLen = -1;
+ req.ntb.schemaRow = me.ntbEntry.schemaRow;
+ ret = buildNormalChildTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_TABLE;
+ } else{
+ ASSERT(0);
+ }
+ tDecoderClear(&dc);
+
+ return ret;
+}
+
+SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx){
+ SMetaTableInfo result = {0};
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen, kLen;
+
+ while(1){
+ if(ctx->index >= taosArrayGetSize(ctx->idList)){
+ metaDebug("tmqsnap get uid info end");
+ return result;
+ }
+ int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index);
+ ctx->index++;
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t));
+ ASSERT(idInfo);
+
+ int32_t ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
+ if(ret != 0) {
+ metaDebug("tmqsnap getUidfromSnapShot not exist uid:%" PRIi64 " version:%" PRIi64, *uidTmp, idInfo->version);
+ continue;
+ }
+ tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ metaDebug("tmqsnap get uid info uid:%" PRIi64 " name:%s index:%d", me.uid, me.name, ctx->index-1);
+
+ if (ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_CHILD_TABLE){
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ result.uid = me.uid;
+ result.suid = me.ctbEntry.suid;
+ result.schema = tCloneSSchemaWrapper(data->schemaRow);
+ strcpy(result.tbName, me.name);
+ tDecoderClear(&dc);
+ break;
+ } else if (ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_NORMAL_TABLE) {
+ result.uid = me.uid;
+ result.suid = 0;
+ strcpy(result.tbName, me.name);
+ result.schema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow);
+ tDecoderClear(&dc);
+ break;
+ } else if(ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid) {
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ result.uid = me.uid;
+ result.suid = me.ctbEntry.suid;
+ strcpy(result.tbName, me.name);
+ result.schema = tCloneSSchemaWrapper(data->schemaRow);
+ tDecoderClear(&dc);
+ break;
+ } else{
+ metaDebug("tmqsnap get uid continue");
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ return result;
+}
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index aa107ab2532b83b40abe8b1abdc60e059ab1de34..583a2e098f8a54ac61f21d696c6e65c62cd5c4ab 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -99,6 +99,7 @@ static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const
memcpy(val, (uint16_t *)&len, VARSTR_HEADER_SIZE);
type = TSDB_DATA_TYPE_VARCHAR;
term = indexTermCreate(suid, ADD_VALUE, type, key, nKey, val, len);
+ taosMemoryFree(val);
} else if (pTagVal->nData == 0) {
term = indexTermCreate(suid, ADD_VALUE, TSDB_DATA_TYPE_VARCHAR, key, nKey, pTagVal->pData, 0);
}
@@ -115,6 +116,7 @@ static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const
indexMultiTermAdd(terms, term);
}
}
+ taosArrayDestroy(pTagVals);
indexJsonPut(pMeta->pTagIvtIdx, terms, tuid);
indexMultiTermDestroy(terms);
#endif
@@ -367,7 +369,7 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
return 0;
}
-int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
+int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMetaRsp **pMetaRsp) {
SMetaEntry me = {0};
SMetaReader mr = {0};
@@ -413,6 +415,25 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
me.ctbEntry.suid = pReq->ctb.suid;
me.ctbEntry.pTags = pReq->ctb.pTag;
+#ifdef TAG_FILTER_DEBUG
+ SArray* pTagVals = NULL;
+ int32_t code = tTagToValArray((STag*)pReq->ctb.pTag, &pTagVals);
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
+
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char* buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
+ memcpy(buf, pTagVal->pData, pTagVal->nData);
+ metaDebug("metaTag table:%s varchar index:%d cid:%d type:%d value:%s", pReq->name, i, pTagVal->cid, pTagVal->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double val = 0;
+ GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
+ metaDebug("metaTag table:%s number index:%d cid:%d type:%d value:%f", pReq->name, i, pTagVal->cid, pTagVal->type, val);
+ }
+ }
+#endif
+
++pMeta->pVnode->config.vndStats.numOfCTables;
} else {
me.ntbEntry.ctime = pReq->ctime;
@@ -423,10 +444,26 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
me.ntbEntry.ncid = me.ntbEntry.schemaRow.pSchema[me.ntbEntry.schemaRow.nCols - 1].colId + 1;
++pMeta->pVnode->config.vndStats.numOfNTables;
+ pMeta->pVnode->config.vndStats.numOfNTimeSeries += me.ntbEntry.schemaRow.nCols - 1;
}
if (metaHandleEntry(pMeta, &me) < 0) goto _err;
+ if (pMetaRsp) {
+ *pMetaRsp = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+
+ if (*pMetaRsp) {
+ if (me.type == TSDB_CHILD_TABLE) {
+ (*pMetaRsp)->tableType = TSDB_CHILD_TABLE;
+ (*pMetaRsp)->tuid = pReq->uid;
+ (*pMetaRsp)->suid = pReq->ctb.suid;
+ strcpy((*pMetaRsp)->tbName, pReq->name);
+ } else {
+ metaUpdateMetaRsp(pReq->uid, pReq->name, &pReq->ntb.schemaRow, *pMetaRsp);
+ }
+ }
+ }
+
metaDebug("vgId:%d, table:%s uid %" PRId64 " is created, type:%" PRId8, TD_VID(pMeta->pVnode), pReq->name, pReq->uid,
pReq->type);
return 0;
@@ -516,6 +553,9 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
SDecoder dc = {0};
rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData);
+ if (rc < 0) {
+ return -1;
+ }
int64_t version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData);
@@ -562,6 +602,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
// drop schema.db (todo)
--pMeta->pVnode->config.vndStats.numOfNTables;
+ pMeta->pVnode->config.vndStats.numOfNTimeSeries -= e.ntbEntry.schemaRow.nCols - 1;
} else if (e.type == TSDB_SUPER_TABLE) {
tdbTbDelete(pMeta->pSuidIdx, &e.uid, sizeof(tb_uid_t), &pMeta->txn);
// drop schema.db (todo)
@@ -664,6 +705,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].flags = pAlterTbReq->flags;
pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].colId = entry.ntbEntry.ncid++;
strcpy(pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].name, pAlterTbReq->colName);
+
+ ++pMeta->pVnode->config.vndStats.numOfNTimeSeries;
break;
case TSDB_ALTER_TABLE_DROP_COLUMN:
if (pColumn == NULL) {
@@ -684,6 +727,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
memmove(pColumn, pColumn + 1, tlen);
}
pSchema->nCols--;
+
+ --pMeta->pVnode->config.vndStats.numOfNTimeSeries;
break;
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
if (pColumn == NULL) {
diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c
index ca5367f39714ed1f3a979068b0a9a7204d385f8c..3cf50a035a720f7bf9e106c69a8a88e1117a8954 100644
--- a/source/dnode/vnode/src/sma/smaCommit.c
+++ b/source/dnode/vnode/src/sma/smaCommit.c
@@ -172,7 +172,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
TdDirPtr pDir = NULL;
TdDirEntryPtr pDirEntry = NULL;
char dir[TSDB_FILENAME_LEN];
- const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$";
+ const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$";
regex_t regex;
int code = 0;
@@ -312,15 +312,22 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
+ int32_t nLoops = 0;
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
- atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
+ while (atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 1) != 0) {
+ ++nLoops;
+ if (nLoops > 1000) {
+ sched_yield();
+ nLoops = 0;
+ }
+ }
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
ASSERT(pRSmaStat->commitAppliedVer > 0);
// step 2: wait for all triggered fetch tasks to finish
- int32_t nLoops = 0;
+ nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma));
@@ -344,7 +351,8 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
return TSDB_CODE_FAILED;
}
- smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId());
+ smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma),
+ (void *)taosGetSelfPthreadId());
nLoops = 0;
while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
++nLoops;
@@ -359,7 +367,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
-#if 0 // consuming task of qTaskInfo clone
+#if 0 // consuming task of qTaskInfo clone
// step 4: swap queue/qall and iQueue/iQall
// lock
// taosWLockLatch(SMA_ENV_LOCK(pEnv));
diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c
index e3b83f9955faf7a8000d18974cb6ec3639948c47..32a419022a312f9ab21681b9bc6f819c7792f51e 100644
--- a/source/dnode/vnode/src/sma/smaEnv.c
+++ b/source/dnode/vnode/src/sma/smaEnv.c
@@ -23,11 +23,13 @@ extern SSmaMgmt smaMgmt;
// declaration of static functions
-static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma);
-static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path);
-static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv);
-static void *tdFreeTSmaStat(STSmaStat *pStat);
-static void tdDestroyRSmaStat(void *pRSmaStat);
+static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv);
+static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv);
+static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma);
+static int32_t tdRsmaStartExecutor(const SSma *pSma);
+static int32_t tdRsmaStopExecutor(const SSma *pSma);
+static void *tdFreeTSmaStat(STSmaStat *pStat);
+static void tdDestroyRSmaStat(void *pRSmaStat);
/**
* @brief rsma init
@@ -97,35 +99,42 @@ void smaCleanUp() {
}
}
-static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path) {
+static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) {
SSmaEnv *pEnv = NULL;
pEnv = (SSmaEnv *)taosMemoryCalloc(1, sizeof(SSmaEnv));
+ *ppEnv = pEnv;
if (!pEnv) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- return NULL;
+ return TSDB_CODE_FAILED;
}
SMA_ENV_TYPE(pEnv) = smaType;
taosInitRWLatch(&(pEnv->lock));
+ (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), *ppEnv)
+ : atomic_store_ptr(&SMA_RSMA_ENV(pSma), *ppEnv);
+
if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) {
tdFreeSmaEnv(pEnv);
- return NULL;
+ *ppEnv = NULL;
+ (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), NULL)
+ : atomic_store_ptr(&SMA_RSMA_ENV(pSma), NULL);
+ return TSDB_CODE_FAILED;
}
- return pEnv;
+ return TSDB_CODE_SUCCESS;
}
-static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv) {
- if (!pEnv) {
+static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) {
+ if (!ppEnv) {
terrno = TSDB_CODE_INVALID_PTR;
return TSDB_CODE_FAILED;
}
- if (!(*pEnv)) {
- if (!(*pEnv = tdNewSmaEnv(pSma, smaType, path))) {
+ if (!(*ppEnv)) {
+ if (tdNewSmaEnv(pSma, smaType, ppEnv) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_FAILED;
}
}
@@ -199,7 +208,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
* tdInitSmaStat invoked in other multithread environment later.
*/
if (!(*pSmaStat)) {
- *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat));
+ *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat) + sizeof(TdThread) * tsNumOfVnodeRsmaThreads);
if (!(*pSmaStat)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
@@ -231,6 +240,10 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
if (!RSMA_INFO_HASH(pRSmaStat)) {
return TSDB_CODE_FAILED;
}
+
+ if (tdRsmaStartExecutor(pSma) < 0) {
+ return TSDB_CODE_FAILED;
+ }
} else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
// TODO
} else {
@@ -291,6 +304,9 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
}
}
+ // step 4:
+ tdRsmaStopExecutor(pSma);
+
// step 5: free pStat
taosMemoryFreeClear(pStat);
}
@@ -381,17 +397,70 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) {
pEnv = (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_load_ptr(&SMA_TSMA_ENV(pSma))
: atomic_load_ptr(&SMA_RSMA_ENV(pSma));
if (!pEnv) {
- char rname[TSDB_FILENAME_LEN] = {0};
-
- if (tdInitSmaEnv(pSma, smaType, rname, &pEnv) < 0) {
+ if (tdInitSmaEnv(pSma, smaType, &pEnv) < 0) {
tdUnLockSma(pSma);
return TSDB_CODE_FAILED;
}
-
- (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), pEnv)
- : atomic_store_ptr(&SMA_RSMA_ENV(pSma), pEnv);
}
tdUnLockSma(pSma);
return TSDB_CODE_SUCCESS;
};
+
+void *tdRSmaExecutorFunc(void *param) {
+ setThreadName("vnode-rsma");
+
+ tdRSmaProcessExecImpl((SSma *)param, RSMA_EXEC_OVERFLOW);
+ return NULL;
+}
+
+static int32_t tdRsmaStartExecutor(const SSma *pSma) {
+ TdThreadAttr thAttr = {0};
+ taosThreadAttrInit(&thAttr);
+ taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
+
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ TdThread *pthread = (TdThread *)&pStat->data;
+
+ for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) {
+ if (taosThreadCreate(&pthread[i], &thAttr, tdRSmaExecutorFunc, (void *)pSma) != 0) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ smaError("vgId:%d, failed to create pthread for rsma since %s", SMA_VID(pSma), terrstr());
+ return -1;
+ }
+ smaDebug("vgId:%d, success to create pthread for rsma", SMA_VID(pSma));
+ }
+
+ taosThreadAttrDestroy(&thAttr);
+ return 0;
+}
+
+static int32_t tdRsmaStopExecutor(const SSma *pSma) {
+ if (pSma && VND_IS_RSMA(pSma->pVnode)) {
+ SSmaEnv *pEnv = NULL;
+ SSmaStat *pStat = NULL;
+ SRSmaStat *pRSmaStat = NULL;
+ TdThread *pthread = NULL;
+
+ if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = SMA_ENV_STAT(pEnv))) {
+ return 0;
+ }
+
+ pEnv->flag |= SMA_ENV_FLG_CLOSE;
+ pRSmaStat = (SRSmaStat *)pStat;
+ pthread = (TdThread *)&pStat->data;
+
+ for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) {
+ tsem_post(&(pRSmaStat->notEmpty));
+ }
+
+ for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) {
+ if (taosCheckPthreadValid(pthread[i])) {
+ smaDebug("vgId:%d, start to join pthread for rsma:%" PRId64, SMA_VID(pSma), pthread[i]);
+ taosThreadJoin(pthread[i], NULL);
+ }
+ }
+ }
+ return 0;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c
index e2710b26e3d3672aac1b8053b019aa0addd37920..3c3097bb2fa7e7717a61d11894490ddc9f571790 100644
--- a/source/dnode/vnode/src/sma/smaOpen.c
+++ b/source/dnode/vnode/src/sma/smaOpen.c
@@ -16,17 +16,17 @@
#include "sma.h"
#include "tsdb.h"
-static int32_t smaEvalDays(SRetention *r, int8_t precision);
-static int32_t smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
+static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration);
+static int32_t smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
static int32_t rsmaRestore(SSma *pSma);
-#define SMA_SET_KEEP_CFG(l) \
+#define SMA_SET_KEEP_CFG(v, l) \
do { \
SRetention *r = &pCfg->retentions[l]; \
pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \
pKeepCfg->keep0 = pKeepCfg->keep2; \
pKeepCfg->keep1 = pKeepCfg->keep2; \
- pKeepCfg->days = smaEvalDays(r, pCfg->precision); \
+ pKeepCfg->days = smaEvalDays(v, pCfg->retentions, l, pCfg->precision, pCfg->days); \
} while (0)
#define SMA_OPEN_RSMA_IMPL(v, l) \
@@ -38,51 +38,78 @@ static int32_t rsmaRestore(SSma *pSma);
} \
break; \
} \
- smaSetKeepCfg(&keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
+ smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg) < 0) { \
goto _err; \
} \
} while (0)
-#define RETENTION_DAYS_SPLIT_RATIO 10
-#define RETENTION_DAYS_SPLIT_MIN 1
-#define RETENTION_DAYS_SPLIT_MAX 30
+/**
+ * @brief Evaluate days(duration) for rsma level 1/2/3.
+ * 1) level 1: duration from "create database"
+ * 2) level 2/3: duration * (freq/freqL1)
+ * @param pVnode
+ * @param r
+ * @param level
+ * @param precision
+ * @param duration
+ * @return int32_t
+ */
+static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration) {
+ int32_t freqDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->freq, precision, TIME_UNIT_MINUTE);
+ int32_t keepDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->keep, precision, TIME_UNIT_MINUTE);
+ int32_t days = duration; // min
-static int32_t smaEvalDays(SRetention *r, int8_t precision) {
- int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY);
- int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY);
+ if (days < freqDuration) {
+ days = freqDuration;
+ }
- int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO;
- if (days <= RETENTION_DAYS_SPLIT_MIN) {
- days = RETENTION_DAYS_SPLIT_MIN;
- if (days < freqDays) {
- days = freqDays + 1;
- }
- } else {
- if (days > RETENTION_DAYS_SPLIT_MAX) {
- days = RETENTION_DAYS_SPLIT_MAX;
- }
- if (days < freqDays) {
- days = freqDays + 1;
- }
+ if (days > keepDuration) {
+ days = keepDuration;
+ }
+
+ if (level == TSDB_RETENTION_L0) {
+ goto end;
+ }
+
+ ASSERT(level >= TSDB_RETENTION_L1 && level <= TSDB_RETENTION_L2);
+
+ freqDuration = convertTimeFromPrecisionToUnit((r + level)->freq, precision, TIME_UNIT_MINUTE);
+ keepDuration = convertTimeFromPrecisionToUnit((r + level)->keep, precision, TIME_UNIT_MINUTE);
+
+ int32_t nFreqTimes = (r + level)->freq / (r + TSDB_RETENTION_L0)->freq;
+ days *= (nFreqTimes > 1 ? nFreqTimes : 1);
+
+ if (days > keepDuration) {
+ days = keepDuration;
}
- return days * 1440;
+
+ if (days > TSDB_MAX_DURATION_PER_FILE) {
+ days = TSDB_MAX_DURATION_PER_FILE;
+ }
+
+ if (days < freqDuration) {
+ days = freqDuration;
+ }
+end:
+ smaInfo("vgId:%d, evaluated duration for level %" PRIi8 " is %d, raw val:%d", TD_VID(pVnode), level + 1, days, duration);
+ return days;
}
-int smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
+int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
pKeepCfg->precision = pCfg->precision;
switch (type) {
case TSDB_TYPE_TSMA:
ASSERT(0);
break;
case TSDB_TYPE_RSMA_L0:
- SMA_SET_KEEP_CFG(0);
+ SMA_SET_KEEP_CFG(pVnode, 0);
break;
case TSDB_TYPE_RSMA_L1:
- SMA_SET_KEEP_CFG(1);
+ SMA_SET_KEEP_CFG(pVnode, 1);
break;
case TSDB_TYPE_RSMA_L2:
- SMA_SET_KEEP_CFG(2);
+ SMA_SET_KEEP_CFG(pVnode, 2);
break;
default:
ASSERT(0);
@@ -146,27 +173,13 @@ int32_t smaClose(SSma *pSma) {
return 0;
}
-int32_t smaPreClose(SSma *pSma) {
- if (pSma && VND_IS_RSMA(pSma->pVnode)) {
- SSmaEnv *pEnv = NULL;
- SRSmaStat *pStat = NULL;
- if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv))) {
- return 0;
- }
- for (int32_t i = 0; i < RSMA_EXECUTOR_MAX; ++i) {
- tsem_post(&(pStat->notEmpty));
- }
- }
- return 0;
-}
-
/**
* @brief rsma env restore
- *
- * @param pSma
- * @param type
- * @param committedVer
- * @return int32_t
+ *
+ * @param pSma
+ * @param type
+ * @param committedVer
+ * @return int32_t
*/
int32_t tdRsmaRestore(SSma *pSma, int8_t type, int64_t committedVer) {
ASSERT(VND_IS_RSMA(pSma->pVnode));
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 448b8ab50862cea44390b7b0c8cbc4d27d96c20c..f2063e306740fe5939831d919b57272483e7183d 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -21,17 +21,17 @@
#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt
#define RSMA_FETCH_DELAY_MAX (900000) // ms
#define RSMA_FETCH_ACTIVE_MAX (1800) // ms
+#define RSMA_FETCH_INTERVAL (5000) // ms
SSmaMgmt smaMgmt = {
.inited = 0,
.rsetId = -1,
};
-#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver"
-#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
+#define TD_QTASKINFO_FNAME_PREFIX "qinf.v"
+
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
-typedef struct SRSmaExecQItem SRSmaExecQItem;
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
@@ -82,11 +82,6 @@ struct SRSmaQTaskInfoIter {
int32_t nBufPos;
};
-struct SRSmaExecQItem {
- void *pRSmaInfo;
- void *qall;
-};
-
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
@@ -621,7 +616,7 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) {
*/
int32_t smaDoRetention(SSma *pSma, int64_t now) {
int32_t code = TSDB_CODE_SUCCESS;
- if (VND_IS_RSMA(pSma->pVnode)) {
+ if (!VND_IS_RSMA(pSma->pVnode)) {
return code;
}
@@ -734,10 +729,12 @@ static int32_t tdExecuteRSmaImplAsync(SSma *pSma, const void *pMsg, int32_t inpu
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
- tsem_post(&(pRSmaStat->notEmpty));
-
int64_t nItems = atomic_fetch_add_64(&pRSmaStat->nBufItems, 1);
+ if (atomic_load_8(&pInfo->assigned) == 0) {
+ tsem_post(&(pRSmaStat->notEmpty));
+ }
+
// smoothing consume
int32_t n = nItems / RSMA_QTASKEXEC_SMOOTH_SIZE;
if (n > 1) {
@@ -911,39 +908,6 @@ static int32_t tdExecuteRSmaAsync(SSma *pSma, const void *pMsg, int32_t inputTyp
return TSDB_CODE_SUCCESS;
}
-static int32_t tdRSmaExecCheck(SSma *pSma) {
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
-
- if (atomic_load_8(&pRSmaStat->nExecutor) >= TMIN(RSMA_EXECUTOR_MAX, tsNumOfVnodeQueryThreads / 2)) {
- return TSDB_CODE_SUCCESS;
- }
-
- SRSmaExecMsg fetchMsg;
- int32_t contLen = sizeof(SMsgHead);
- void *pBuf = rpcMallocCont(0 + contLen);
-
- ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
- ((SMsgHead *)pBuf)->contLen = sizeof(SMsgHead);
-
- SRpcMsg rpcMsg = {
- .code = 0,
- .msgType = TDMT_VND_EXEC_RSMA,
- .pCont = pBuf,
- .contLen = contLen,
- };
-
- if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) {
- smaError("vgId:%d, failed to put rsma exec msg into query-queue since %s", SMA_VID(pSma), terrstr());
- goto _err;
- }
-
- smaDebug("vgId:%d, success to put rsma fetch msg into query-queue", SMA_VID(pSma));
-
- return TSDB_CODE_SUCCESS;
-_err:
- return TSDB_CODE_FAILED;
-}
-
int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
if (!pEnv) {
@@ -974,10 +938,6 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
goto _err;
}
}
-
- if (tdRSmaExecCheck(pSma) < 0) {
- goto _err;
- }
}
}
tdUidStoreDestory(&uidStore);
@@ -1118,9 +1078,6 @@ static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTa
goto _err;
}
- SSmaEnv *pRSmaEnv = pSma->pRSmaEnv;
- SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pRSmaEnv);
-
SRSmaQTaskInfoIter fIter = {0};
if (tdRSmaQTaskInfoIterInit(&fIter, &tFile) < 0) {
tdRSmaQTaskInfoIterDestroy(&fIter);
@@ -1536,13 +1493,13 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
switch (rsmaTriggerStat) {
case TASK_TRIGGER_STAT_PAUSED:
case TASK_TRIGGER_STAT_CANCELLED: {
- tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8
", rsetId rsetId:%" PRIi64 " refId:%d",
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
- taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
}
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
return;
}
default:
@@ -1553,7 +1510,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE);
switch (fetchTriggerStat) {
case TASK_TRIGGER_STAT_ACTIVE: {
- smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
+ smaDebug("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
// async process
pItem->fetchLevel = pItem->level;
@@ -1563,9 +1520,9 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
ASSERT(qItem->level == pItem->level);
ASSERT(qItem->fetchLevel == pItem->fetchLevel);
#endif
- tsem_post(&(pStat->notEmpty));
- smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level,
- pRSmaInfo->suid);
+ if (atomic_load_8(&pRSmaInfo->assigned) == 0) {
+ tsem_post(&(pStat->notEmpty));
+ }
} break;
case TASK_TRIGGER_STAT_PAUSED: {
smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused",
@@ -1591,9 +1548,11 @@ _end:
}
static void tdFreeRSmaSubmitItems(SArray *pItems) {
+ ASSERT(taosArrayGetSize(pItems) > 0);
for (int32_t i = 0; i < taosArrayGetSize(pItems); ++i) {
taosFreeQitem(*(void **)taosArrayGet(pItems, i));
}
+ taosArrayClear(pItems);
}
/**
@@ -1703,6 +1662,7 @@ _err:
* @param type
* @return int32_t
*/
+
int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
SVnode *pVnode = pSma->pVnode;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
@@ -1722,41 +1682,68 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
goto _err;
}
- bool isBusy = false;
while (true) {
- isBusy = false;
// step 1: rsma exec - consume data in buffer queue for all suids
if (type == RSMA_EXEC_OVERFLOW || type == RSMA_EXEC_COMMIT) {
- void *pIter = taosHashIterate(infoHash, NULL); // infoHash has r/w lock
- while (pIter) {
+ void *pIter = NULL;
+ while ((pIter = taosHashIterate(infoHash, pIter))) {
SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
- int64_t itemSize = 0;
- if ((itemSize = taosQueueItemSize(pInfo->queue)) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel ||
- RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
- smaDebug("vgId:%d, queueItemSize is %" PRIi64 " execType:%" PRIi8, SMA_VID(pSma), itemSize, type);
- if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) {
- taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock
- int32_t qallItemSize = taosQallItemSize(pInfo->qall);
- if (qallItemSize > 0) {
- tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type);
+ if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) {
+ if ((taosQueueItemSize(pInfo->queue) > 0) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel ||
+ RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
+ int32_t batchCnt = -1;
+ int32_t batchMax = taosHashGetSize(infoHash) / tsNumOfVnodeRsmaThreads;
+ bool occupied = (batchMax <= 1);
+ if (batchMax > 1) {
+ batchMax = 100 / batchMax;
}
-
- if (type == RSMA_EXEC_OVERFLOW) {
- tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
+ while (occupied || (++batchCnt < batchMax)) { // greedy mode
+ taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock
+ int32_t qallItemSize = taosQallItemSize(pInfo->qall);
+ if (qallItemSize > 0) {
+ tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type);
+ smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type);
+ }
+
+ int8_t oldStat = atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2);
+ if (oldStat == 0 ||
+ ((oldStat == 2) && atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)) < TASK_TRIGGER_STAT_PAUSED)) {
+ atomic_fetch_add_32(&pRSmaStat->nFetchAll, 1);
+ tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
+ if (0 == atomic_sub_fetch_32(&pRSmaStat->nFetchAll, 1)) {
+ atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
+ }
+ }
+
+ if (qallItemSize > 0) {
+ atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
+ continue;
+ } else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
+ if (atomic_load_8(RSMA_COMMIT_STAT(pRSmaStat)) == 0) {
+ continue;
+ }
+ for (int32_t j = 0; j < TSDB_RETENTION_L2; ++j) {
+ SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, j);
+ if (pItem->fetchLevel) {
+ pItem->fetchLevel = 0;
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ }
+ }
+ }
+
+ break;
}
-
- if (qallItemSize > 0) {
- // subtract the item size after the task finished, commit should wait for all items be consumed
- atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
- isBusy = true;
- }
- ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
}
+ atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
- pIter = taosHashIterate(infoHash, pIter);
}
if (type == RSMA_EXEC_COMMIT) {
- break;
+ if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) {
+ break;
+ } else {
+ // commit should wait for all items be consumed
+ continue;
+ }
}
}
#if 0
@@ -1776,7 +1763,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
}
// tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
- ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
+ atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
}
ASSERT(taosQueueItemSize(pInfo->iQueue) == 0);
@@ -1790,16 +1777,19 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
}
if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) {
- if (pVnode->inClose) {
+ if (pEnv->flag & SMA_ENV_FLG_CLOSE) {
break;
}
+
tsem_wait(&pRSmaStat->notEmpty);
- if (pVnode->inClose && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
- smaInfo("vgId:%d, exec task end, inClose:%d, nBufItems:%" PRIi64, SMA_VID(pSma), pVnode->inClose,
+
+ if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
+ smaInfo("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag,
atomic_load_64(&pRSmaStat->nBufItems));
break;
}
}
+
} // end of while(true)
_end:
@@ -1809,39 +1799,3 @@ _err:
taosArrayDestroy(pSubmitArr);
return TSDB_CODE_FAILED;
}
-
-/**
- * @brief exec rsma level 1data, fetch result of level 2/3 and submit
- *
- * @param pSma
- * @param pMsg
- * @return int32_t
- */
-int32_t smaProcessExec(SSma *pSma, void *pMsg) {
- SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
-
- if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) {
- terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP;
- goto _err;
- }
- smaDebug("vgId:%d, begin to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
-
- int8_t nOld = atomic_fetch_add_8(&pRSmaStat->nExecutor, 1);
-
- if (nOld < TMIN(RSMA_EXECUTOR_MAX, tsNumOfVnodeQueryThreads / 2)) {
- if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_OVERFLOW) < 0) {
- goto _err;
- }
- } else {
- atomic_fetch_sub_8(&pRSmaStat->nExecutor, 1);
- }
-
- smaDebug("vgId:%d, success to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
- return TSDB_CODE_SUCCESS;
-_err:
- atomic_fetch_sub_8(&pRSmaStat->nExecutor, 1);
- smaError("vgId:%d, failed to process rsma exec msg by TID:%p since %s", SMA_VID(pSma), (void *)taosGetSelfPthreadId(),
- terrstr());
- return TSDB_CODE_FAILED;
-}
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index c6bc8e6e598507ea820bb109e84fbb41a0ed099b..3f42d8360ee8648ec66068c31773caea6faea41f 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -79,6 +79,10 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
ASSERT(0);
}
+ if (streamLoadTasks(pTq->pStreamMeta) < 0) {
+ ASSERT(0);
+ }
+
return pTq;
}
@@ -96,7 +100,13 @@ void tqClose(STQ* pTq) {
}
int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp) {
- int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqMetaRsp(NULL, pRsp);
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSMqMetaRsp, pRsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+ int32_t tlen = sizeof(SMqRspHead) + len;
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
return -1;
@@ -107,7 +117,11 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq,
((SMqRspHead*)buf)->consumerId = pReq->consumerId;
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
- tEncodeSMqMetaRsp(&abuf, pRsp);
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, abuf, len);
+ tEncodeSMqMetaRsp(&encoder, pRsp);
+ tEncoderClear(&encoder);
SRpcMsg resp = {
.info = pMsg->info,
@@ -117,9 +131,8 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq,
};
tmsgSendRsp(&resp);
- tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, res msg type %d, reqOffset:%" PRId64
- ", rspOffset:%" PRId64,
- TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->reqOffset, pRsp->rspOffset);
+ tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, res msg type %d, offset type:%d",
+ TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->rspOffset.type);
return 0;
}
@@ -183,6 +196,66 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
return 0;
}
+int32_t tqSendTaosxRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const STaosxRsp* pRsp) {
+ ASSERT(taosArrayGetSize(pRsp->blockData) == pRsp->blockNum);
+ ASSERT(taosArrayGetSize(pRsp->blockDataLen) == pRsp->blockNum);
+
+ if (pRsp->withSchema) {
+ ASSERT(taosArrayGetSize(pRsp->blockSchema) == pRsp->blockNum);
+ } else {
+ ASSERT(taosArrayGetSize(pRsp->blockSchema) == 0);
+ }
+
+ if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
+ if (pRsp->blockNum > 0) {
+ ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version);
+ } else {
+ ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version);
+ }
+ }
+
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSTaosxRsp, pRsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+ int32_t tlen = sizeof(SMqRspHead) + len;
+ void* buf = rpcMallocCont(tlen);
+ if (buf == NULL) {
+ return -1;
+ }
+
+ ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__TAOSX_RSP;
+ ((SMqRspHead*)buf)->epoch = pReq->epoch;
+ ((SMqRspHead*)buf)->consumerId = pReq->consumerId;
+
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, abuf, len);
+ tEncodeSTaosxRsp(&encoder, pRsp);
+ tEncoderClear(&encoder);
+
+ SRpcMsg rsp = {
+ .info = pMsg->info,
+ .pCont = buf,
+ .contLen = tlen,
+ .code = 0,
+ };
+ tmsgSendRsp(&rsp);
+
+ char buf1[80] = {0};
+ char buf2[80] = {0};
+ tFormatOffset(buf1, 80, &pRsp->reqOffset);
+ tFormatOffset(buf2, 80, &pRsp->rspOffset);
+ tqDebug("taosx rsp, vgId:%d, from consumer:%" PRId64
+ ", (epoch %d) send rsp, block num: %d, reqOffset:%s, rspOffset:%s",
+ TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2);
+
+ return 0;
+}
+
static FORCE_INLINE bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) {
return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG &&
pLeft->val.version <= pRight->val.version;
@@ -198,7 +271,7 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t m
}
tDecoderClear(&decoder);
- if (offset.val.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (offset.val.type == TMQ_OFFSET__SNAPSHOT_DATA || offset.val.type == TMQ_OFFSET__SNAPSHOT_META) {
tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:snapshot) uid:%" PRId64 ", ts:%" PRId64,
offset.subKey, TD_VID(pTq->pVnode), offset.val.uid, offset.val.ts);
} else if (offset.val.type == TMQ_OFFSET__LOG) {
@@ -290,10 +363,25 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su
return 0;
}
+static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
+ pRsp->reqOffset = pReq->reqOffset;
+
+ pRsp->withTbName = 1;
+ pRsp->withSchema = 1;
+ pRsp->blockData = taosArrayInit(0, sizeof(void*));
+ pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
+ pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
+ pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
+
+ if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
+ return -1;
+ }
+ return 0;
+}
+
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq* pReq = pMsg->pCont;
int64_t consumerId = pReq->consumerId;
- int64_t timeout = pReq->timeout;
int32_t reqEpoch = pReq->epoch;
int32_t code = 0;
STqOffsetVal reqOffset = pReq->reqOffset;
@@ -329,9 +417,6 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
tqDebug("tmq poll: consumer %" PRId64 " (epoch %d), subkey %s, recv poll req in vg %d, req offset %s", consumerId,
pReq->epoch, pHandle->subKey, TD_VID(pTq->pVnode), buf);
- SMqDataRsp dataRsp = {0};
- tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
-
// 2.reset offset if needed
if (reqOffset.type > 0) {
fetchOffsetNew = reqOffset;
@@ -345,62 +430,97 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
TD_VID(pTq->pVnode), formatBuf);
} else {
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
- if (pReq->useSnapshot && pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- if (!pHandle->fetchMeta) {
- tqOffsetResetToData(&fetchOffsetNew, 0, 0);
+ if (pReq->useSnapshot) {
+ if (pHandle->fetchMeta) {
+ tqOffsetResetToMeta(&fetchOffsetNew, 0);
} else {
- // reset to meta
- ASSERT(0);
+ tqOffsetResetToData(&fetchOffsetNew, 0, 0);
}
} else {
tqOffsetResetToLog(&fetchOffsetNew, walGetFirstVer(pTq->pVnode->pWal));
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
+ SMqDataRsp dataRsp = {0};
+ tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
+
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, vg %d, offset reset to %" PRId64, consumerId,
pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.rspOffset.version);
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSMqDataRsp(&dataRsp);
+ return code;
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
tqError("tmq poll: subkey %s, no offset committed for consumer %" PRId64
" in vg %d, subkey %s, reset none failed",
pHandle->subKey, consumerId, TD_VID(pTq->pVnode), pReq->subKey);
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
- code = -1;
- goto OVER;
+ return -1;
}
}
}
- // 3.query
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- /*if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {*/
- /*fetchOffsetNew.version++;*/
- /*}*/
- if (tqScan(pTq, pHandle, &dataRsp, &fetchOffsetNew) < 0) {
- ASSERT(0);
+ SMqDataRsp dataRsp = {0};
+ tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
+ tqScanData(pTq, pHandle, &dataRsp, &fetchOffsetNew);
+
+ if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
- goto OVER;
}
- if (dataRsp.blockNum == 0) {
- // TODO add to async task pool
- /*dataRsp.rspOffset.version--;*/
+
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d, uid:%ld, version:%ld",
+ consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type,
+ dataRsp.rspOffset.uid, dataRsp.rspOffset.version);
+
+ tDeleteSMqDataRsp(&dataRsp);
+ return code;
+ }
+
+ // for taosx
+ ASSERT(pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN);
+
+ SMqMetaRsp metaRsp = {0};
+
+ STaosxRsp taosxRsp = {0};
+ tqInitTaosxRsp(&taosxRsp, pReq);
+
+ if (fetchOffsetNew.type != TMQ_OFFSET__LOG) {
+ tqScan(pTq, pHandle, &taosxRsp, &metaRsp, &fetchOffsetNew);
+
+ if (metaRsp.metaRspLen > 0) {
+ if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
+ code = -1;
+ }
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId,
+ pHandle->subKey, TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid,
+ metaRsp.rspOffset.version);
+ taosMemoryFree(metaRsp.metaRsp);
+ tDeleteSTaosxRsp(&taosxRsp);
+ return code;
}
- if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
- code = -1;
+
+ if (taosxRsp.blockNum > 0) {
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
+ code = -1;
+ }
+ tDeleteSTaosxRsp(&taosxRsp);
+ return code;
+ } else {
+ fetchOffsetNew = taosxRsp.rspOffset;
}
- goto OVER;
+
+ tqDebug("taosx poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld",
+ consumerId, pHandle->subKey, TD_VID(pTq->pVnode), taosxRsp.blockNum, taosxRsp.rspOffset.type,
+ taosxRsp.rspOffset.uid, taosxRsp.rspOffset.version);
}
- if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN) {
- ASSERT(fetchOffsetNew.type == TMQ_OFFSET__LOG);
+ if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {
int64_t fetchVer = fetchOffsetNew.version + 1;
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
if (pCkHead == NULL) {
- code = -1;
- goto OVER;
+ return -1;
}
walSetReaderCapacity(pHandle->pWalReader, 2048);
@@ -415,14 +535,13 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
}
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead) < 0) {
- // TODO add push mgr
-
- tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer);
- ASSERT(dataRsp.rspOffset.version >= dataRsp.reqOffset.version);
- if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
+ tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSTaosxRsp(&taosxRsp);
+ if (pCkHead) taosMemoryFree(pCkHead);
+ return code;
}
SWalCont* pHead = &pCkHead->head;
@@ -433,19 +552,19 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
- if (tqLogScanExec(pTq, &pHandle->execHandle, pCont, &dataRsp) < 0) {
+ if (tqTaosxScanLog(pTq, pHandle, pCont, &taosxRsp) < 0) {
/*ASSERT(0);*/
}
// TODO batch optimization:
// TODO continue scan until meeting batch requirement
- if (dataRsp.blockNum > 0 /* threshold */) {
- tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer);
- ASSERT(dataRsp.rspOffset.version >= dataRsp.reqOffset.version);
-
- if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
+ if (taosxRsp.blockNum > 0 /* threshold */) {
+ tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSTaosxRsp(&taosxRsp);
+ if (pCkHead) taosMemoryFree(pCkHead);
+ return code;
} else {
fetchVer++;
}
@@ -454,40 +573,22 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
ASSERT(pHandle->fetchMeta);
ASSERT(IS_META_MSG(pHead->msgType));
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
- SMqMetaRsp metaRsp = {0};
- /*metaRsp.reqOffset = pReq->reqOffset.version;*/
- metaRsp.rspOffset = fetchVer;
- /*metaRsp.rspOffsetNew.version = fetchVer;*/
- tqOffsetResetToLog(&metaRsp.reqOffsetNew, pReq->reqOffset.version);
- tqOffsetResetToLog(&metaRsp.rspOffsetNew, fetchVer);
+ tqOffsetResetToLog(&metaRsp.rspOffset, fetchVer);
metaRsp.resMsgType = pHead->msgType;
metaRsp.metaRspLen = pHead->bodyLen;
metaRsp.metaRsp = pHead->body;
if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
code = -1;
- goto OVER;
+ taosMemoryFree(pCkHead);
+ return code;
}
code = 0;
- goto OVER;
+ if (pCkHead) taosMemoryFree(pCkHead);
+ return code;
}
}
}
-
-OVER:
- if (pCkHead) taosMemoryFree(pCkHead);
- // TODO wrap in destroy func
- taosArrayDestroy(dataRsp.blockDataLen);
- taosArrayDestroyP(dataRsp.blockData, (FDelete)taosMemoryFree);
-
- if (dataRsp.withSchema) {
- taosArrayDestroyP(dataRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
- }
-
- if (dataRsp.withTbName) {
- taosArrayDestroyP(dataRsp.blockTbName, (FDelete)taosMemoryFree);
- }
-
- return code;
+ return 0;
}
int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
@@ -557,6 +658,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
pHandle->execHandle.subType = req.subType;
pHandle->fetchMeta = req.withMeta;
+
// TODO version should be assigned and refed during preprocess
SWalRef* pRef = walRefCommittedVer(pTq->pVnode->pWal);
if (pRef == NULL) {
@@ -566,36 +668,42 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
int64_t ver = pRef->refVer;
pHandle->pRef = pRef;
+ SReadHandle handle = {
+ .meta = pTq->pVnode->pMeta,
+ .vnode = pTq->pVnode,
+ .initTableReader = true,
+ .initTqReader = true,
+ .version = ver,
+ };
+ pHandle->snapshotVer = ver;
+
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
pHandle->execHandle.execCol.qmsg = req.qmsg;
- pHandle->snapshotVer = ver;
req.qmsg = NULL;
- SReadHandle handle = {
- .meta = pTq->pVnode->pMeta,
- .vnode = pTq->pVnode,
- .initTableReader = true,
- .initTqReader = true,
- .version = ver,
- };
- pHandle->execHandle.execCol.task =
+
+ pHandle->execHandle.task =
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols,
&pHandle->execHandle.pSchemaWrapper);
- ASSERT(pHandle->execHandle.execCol.task);
+ ASSERT(pHandle->execHandle.task);
void* scanner = NULL;
- qExtractStreamScanner(pHandle->execHandle.execCol.task, &scanner);
+ qExtractStreamScanner(pHandle->execHandle.task, &scanner);
ASSERT(scanner);
pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
ASSERT(pHandle->execHandle.pExecReader);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
-
pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode);
pHandle->execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ buildSnapContext(handle.meta, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta,
+ (SSnapContext**)(&handle.sContext));
+
+ pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
pHandle->execHandle.execTb.suid = req.suid;
+
SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
vnodeGetCtbIdList(pTq->pVnode, req.suid, tbUidList);
tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, req.suid);
@@ -606,6 +714,10 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode);
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
taosArrayDestroy(tbUidList);
+
+ buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
+ (SSnapContext**)(&handle.sContext));
+ pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
}
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);
@@ -648,17 +760,28 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
// expand executor
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
+ pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask);
+ if (pTask->pState == NULL) {
+ return -1;
+ }
+
SReadHandle handle = {
.meta = pTq->pVnode->pMeta,
.vnode = pTq->pVnode,
.initTqReader = 1,
+ .pStateBackend = pTask->pState,
};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle);
ASSERT(pTask->exec.executor);
} else if (pTask->taskLevel == TASK_LEVEL__AGG) {
+ pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask);
+ if (pTask->pState == NULL) {
+ return -1;
+ }
SReadHandle mgHandle = {
.vnode = NULL,
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo),
+ .pStateBackend = pTask->pState,
};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle);
ASSERT(pTask->exec.executor);
diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c
index 435bbb77b8cab0b6c631f98e30444501ae8faf03..8c3fa254461e2f31a71787606f6378c2f50868b5 100644
--- a/source/dnode/vnode/src/tq/tqExec.c
+++ b/source/dnode/vnode/src/tq/tqExec.c
@@ -60,18 +60,20 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) {
return 0;
}
-int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
+int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
const STqExecHandle* pExec = &pHandle->execHandle;
- qTaskInfo_t task = pExec->execCol.task;
+ ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
+
+ qTaskInfo_t task = pExec->task;
- if (qStreamPrepareScan(task, pOffset) < 0) {
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
if (pOffset->type == TMQ_OFFSET__LOG) {
pRsp->rspOffset = *pOffset;
return 0;
} else {
tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
- if (qStreamPrepareScan(task, pOffset) < 0) {
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
pRsp->rspOffset = *pOffset;
return 0;
@@ -83,124 +85,148 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVa
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
- tqDebug("task start to execute");
+ tqDebug("tmq task start to execute");
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
- tqDebug("task execute end, get %p", pDataBlock);
+ tqDebug("tmq task executed, get %p", pDataBlock);
- if (pDataBlock != NULL) {
- if (pRsp->withTbName) {
- if (pOffset->type == TMQ_OFFSET__LOG) {
- int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
- continue;
- }
- } else {
- pRsp->withTbName = 0;
- }
- }
- tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
- pRsp->blockNum++;
- if (pOffset->type == TMQ_OFFSET__LOG) {
- continue;
- } else {
- rowCnt += pDataBlock->info.rows;
- if (rowCnt <= 4096) continue;
- }
+ if (pDataBlock == NULL) {
+ break;
}
- if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
- tqDebug("vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
- pHandle->snapshotVer + 1);
- tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
- qStreamPrepareScan(task, pOffset);
- continue;
- }
-
- void* meta = qStreamExtractMetaMsg(task);
- if (meta != NULL) {
- // tq add meta to rsp
- }
+ tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
+ pRsp->blockNum++;
- if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
- ASSERT(0);
+ if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ rowCnt += pDataBlock->info.rows;
+ if (rowCnt >= 4096) break;
}
+ }
- ASSERT(pRsp->rspOffset.type != 0);
+ if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+ ASSERT(pRsp->rspOffset.type != 0);
-#if 0
- if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
- if (pRsp->blockNum > 0) {
- ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version);
- } else {
- ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version);
- }
+ if (pRsp->withTbName) {
+ if (pRsp->rspOffset.type == TMQ_OFFSET__LOG) {
+ int64_t uid = pExec->pExecReader->msgIter.uid;
+ tqAddTbNameToRsp(pTq, uid, pRsp);
+ } else {
+ pRsp->withTbName = false;
}
-#endif
-
- tqDebug("task exec exited");
- break;
}
+ ASSERT(pRsp->withSchema == false);
return 0;
}
-#if 0
-int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal offset, int32_t workerId) {
- ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
- qTaskInfo_t task = pExec->execCol.task[workerId];
+int32_t tqScan(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* pOffset) {
+ const STqExecHandle* pExec = &pHandle->execHandle;
+ qTaskInfo_t task = pExec->task;
- if (qStreamPrepareTsdbScan(task, offset.uid, offset.ts) < 0) {
- ASSERT(0);
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
+ tqDebug("prepare scan failed, return");
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ pRsp->rspOffset = *pOffset;
+ return 0;
+ } else {
+ tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
+ tqDebug("prepare scan failed, return");
+ pRsp->rspOffset = *pOffset;
+ return 0;
+ }
+ }
}
int32_t rowCnt = 0;
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
+ tqDebug("tmqsnap task start to execute");
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
- if (pDataBlock == NULL) break;
+ tqDebug("tmqsnap task execute end, get %p", pDataBlock);
- ASSERT(pDataBlock->info.rows != 0);
- ASSERT(taosArrayGetSize(pDataBlock->pDataBlock) != 0);
+ if (pDataBlock != NULL) {
+ if (pRsp->withTbName) {
+ int64_t uid = 0;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ uid = pExec->pExecReader->msgIter.uid;
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
+ continue;
+ }
+ } else {
+ char* tbName = strdup(qExtractTbnameFromTask(task));
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ }
+ }
+ if (pRsp->withSchema) {
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
+ } else {
+ SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ }
+ }
- tqAddBlockDataToRsp(pDataBlock, pRsp);
+ tqAddBlockDataToRsp(pDataBlock, (SMqDataRsp*)pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
+ pRsp->blockNum++;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ continue;
+ } else {
+ rowCnt += pDataBlock->info.rows;
+ if (rowCnt <= 4096) continue;
+ }
+ }
- if (pRsp->withTbName) {
- pRsp->withTbName = 0;
-#if 0
- int64_t uid;
- int64_t ts;
- if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
- ASSERT(0);
+ if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (qStreamExtractPrepareUid(task) != 0) {
+ continue;
}
- tqAddTbNameToRsp(pTq, uid, pRsp);
-#endif
+ tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
+ pHandle->snapshotVer + 1);
+ break;
}
- pRsp->blockNum++;
- rowCnt += pDataBlock->info.rows;
- if (rowCnt >= 4096) break;
+ if (pRsp->blockNum > 0) {
+ tqDebug("tmqsnap task exec exited, get data");
+ break;
+ }
+
+ SMqMetaRsp* tmp = qStreamExtractMetaMsg(task);
+ if (tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ tqOffsetResetToData(pOffset, tmp->rspOffset.uid, tmp->rspOffset.ts);
+ qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
+ tmp->rspOffset.type = TMQ_OFFSET__SNAPSHOT_META;
+ tqDebug("tmqsnap task exec change to get data");
+ continue;
+ }
+
+ *pMetaRsp = *tmp;
+ tqDebug("tmqsnap task exec exited, get meta");
+
+ tqDebug("task exec exited");
+ break;
}
- int64_t uid;
- int64_t ts;
- if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
+
+ if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
ASSERT(0);
}
- tqOffsetResetToData(&pRsp->rspOffset, uid, ts);
+ ASSERT(pRsp->rspOffset.type != 0);
return 0;
}
-#endif
-int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp) {
+int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp) {
+ STqExecHandle* pExec = &pHandle->execHandle;
ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN);
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
- pRsp->withSchema = 1;
STqReader* pReader = pExec->pExecReader;
tqReaderSetDataMsg(pReader, pReq, 0);
while (tqNextDataBlock(pReader)) {
@@ -210,18 +236,17 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
}
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
blockDataFreeRes(&block);
continue;
}
}
- tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
+ tqAddBlockDataToRsp(&block, (SMqDataRsp*)pRsp, taosArrayGetSize(block.pDataBlock));
blockDataFreeRes(&block);
- tqAddBlockSchemaToRsp(pExec, pRsp);
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
pRsp->blockNum++;
}
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
- pRsp->withSchema = 1;
STqReader* pReader = pExec->pExecReader;
tqReaderSetDataMsg(pReader, pReq, 0);
while (tqNextDataBlockFilterOut(pReader, pExec->execDb.pFilterOutTbUid)) {
@@ -231,16 +256,39 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
}
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
blockDataFreeRes(&block);
continue;
}
}
- tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
+ tqAddBlockDataToRsp(&block, (SMqDataRsp*)pRsp, taosArrayGetSize(block.pDataBlock));
blockDataFreeRes(&block);
- tqAddBlockSchemaToRsp(pExec, pRsp);
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
pRsp->blockNum++;
}
+#if 1
+ if (pHandle->fetchMeta && pRsp->blockNum) {
+ SSubmitMsgIter iter = {0};
+ tInitSubmitMsgIter(pReq, &iter);
+ STaosxRsp* pXrsp = (STaosxRsp*)pRsp;
+ while (1) {
+ SSubmitBlk* pBlk = NULL;
+ if (tGetSubmitMsgNext(&iter, &pBlk) < 0) break;
+ if (pBlk == NULL) break;
+ if (pBlk->schemaLen > 0) {
+ if (pXrsp->createTableNum == 0) {
+ pXrsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
+ pXrsp->createTableReq = taosArrayInit(0, sizeof(void*));
+ }
+ void* createReq = taosMemoryCalloc(1, pBlk->schemaLen);
+ memcpy(createReq, pBlk->data, pBlk->schemaLen);
+ taosArrayPush(pXrsp->createTableLen, &pBlk->schemaLen);
+ taosArrayPush(pXrsp->createTableReq, &createReq);
+ pXrsp->createTableNum++;
+ }
+ }
+ }
+#endif
}
if (pRsp->blockNum == 0) {
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index 405bc669bd23c27b2b234d2b60be4ef6def8bc80..62f8debccb8ff9c478de0fb331cc5741b503b011 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -18,12 +18,25 @@
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1;
+ if (tEncodeI8(pEncoder, pHandle->fetchMeta) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->snapshotVer) < 0) return -1;
if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1;
if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tEncodeCStr(pEncoder, pHandle->execHandle.execCol.qmsg) < 0) return -1;
+ } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB){
+ int32_t size = taosHashGetSize(pHandle->execHandle.execDb.pFilterOutTbUid);
+ if (tEncodeI32(pEncoder, size) < 0) return -1;
+ void *pIter = NULL;
+ pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter);
+ while(pIter){
+ int64_t *tbUid = (int64_t *)taosHashGetKey(pIter, NULL);
+ if (tEncodeI64(pEncoder, *tbUid) < 0) return -1;
+ pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter);
+ }
+ } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE){
+ if (tEncodeI64(pEncoder, pHandle->execHandle.execTb.suid) < 0) return -1;
}
tEndEncode(pEncoder);
return pEncoder->pos;
@@ -32,12 +45,25 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pHandle->fetchMeta) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->snapshotVer) < 0) return -1;
if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1;
if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.execCol.qmsg) < 0) return -1;
+ }else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB){
+ pHandle->execHandle.execDb.pFilterOutTbUid =
+ taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ int32_t size = 0;
+ if (tDecodeI32(pDecoder, &size) < 0) return -1;
+ for(int32_t i = 0; i < size; i++){
+ int64_t tbUid = 0;
+ if (tDecodeI64(pDecoder, &tbUid) < 0) return -1;
+ taosHashPut(pHandle->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
+ }
+ } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE){
+ if (tDecodeI64(pDecoder, &pHandle->execHandle.execTb.suid) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
@@ -249,27 +275,48 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
}
walRefVer(handle.pRef, handle.snapshotVer);
+ SReadHandle reader = {
+ .meta = pTq->pVnode->pMeta,
+ .vnode = pTq->pVnode,
+ .initTableReader = true,
+ .initTqReader = true,
+ .version = handle.snapshotVer,
+ };
+
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- SReadHandle reader = {
- .meta = pTq->pVnode->pMeta,
- .vnode = pTq->pVnode,
- .initTableReader = true,
- .initTqReader = true,
- .version = handle.snapshotVer,
- };
-
- handle.execHandle.execCol.task = qCreateQueueExecTaskInfo(
+
+ handle.execHandle.task = qCreateQueueExecTaskInfo(
handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper);
- ASSERT(handle.execHandle.execCol.task);
+ ASSERT(handle.execHandle.task);
void* scanner = NULL;
- qExtractStreamScanner(handle.execHandle.execCol.task, &scanner);
+ qExtractStreamScanner(handle.execHandle.task, &scanner);
ASSERT(scanner);
handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
ASSERT(handle.execHandle.pExecReader);
- } else {
+ } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
- handle.execHandle.execDb.pFilterOutTbUid =
- taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
+
+ buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext));
+ handle.execHandle.task =
+ qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL);
+ } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
+
+ SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
+ vnodeGetCtbIdList(pTq->pVnode, handle.execHandle.execTb.suid, tbUidList);
+ tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
+ for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
+ int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
+ tqDebug("vgId:%d, idx %d, uid:%" PRId64, TD_VID(pTq->pVnode), i, tbUid);
+ }
+ handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
+ tqReaderSetTbUidList(handle.execHandle.pExecReader, tbUidList);
+ taosArrayDestroy(tbUidList);
+
+ buildSnapContext(reader.meta, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext));
+ handle.execHandle.task =
+ qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL);
}
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, TD_VID(pTq->pVnode));
taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index e6a331f20e1943a3e40b672a0ef214322db09c5c..375130fa2c34f5b954e3fe3710914dc01e8f6363 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -15,6 +15,162 @@
#include "tq.h"
+
+bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){
+ if(pHandle->execHandle.subType != TOPIC_SUB_TYPE__TABLE){
+ return true;
+ }
+
+ int16_t msgType = pHead->msgType;
+ char* body = pHead->body;
+ int32_t bodyLen = pHead->bodyLen;
+
+ int64_t tbSuid = pHandle->execHandle.execTb.suid;
+ int64_t realTbSuid = 0;
+ SDecoder coder;
+ void* data = POINTER_SHIFT(body, sizeof(SMsgHead));
+ int32_t len = bodyLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (msgType == TDMT_VND_CREATE_STB || msgType == TDMT_VND_ALTER_STB) {
+ SVCreateStbReq req = {0};
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else if (msgType == TDMT_VND_DROP_STB) {
+ SVDropStbReq req = {0};
+ if (tDecodeSVDropStbReq(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else if (msgType == TDMT_VND_CREATE_TABLE) {
+ SVCreateTbBatchReq req = {0};
+ if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ int32_t needRebuild = 0;
+ SVCreateTbReq* pCreateReq = NULL;
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){
+ needRebuild++;
+ }
+ }
+ if(needRebuild == 0){
+ // do nothing
+ }else if(needRebuild == req.nReqs){
+ realTbSuid = tbSuid;
+ }else{
+ realTbSuid = tbSuid;
+ SVCreateTbBatchReq reqNew = {0};
+ reqNew.pArray = taosArrayInit(req.nReqs, sizeof(struct SVCreateTbReq));
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){
+ reqNew.nReqs++;
+ taosArrayPush(reqNew.pArray, pCreateReq);
+ }
+ }
+
+ int tlen;
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVCreateTbBatchReq, &reqNew, tlen, ret);
+ void* buf = taosMemoryMalloc(tlen);
+ if (NULL == buf) {
+ taosArrayDestroy(reqNew.pArray);
+ goto end;
+ }
+ SEncoder coderNew = {0};
+ tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead));
+ tEncodeSVCreateTbBatchReq(&coderNew, &reqNew);
+ tEncoderClear(&coderNew);
+ memcpy(pHead->body + sizeof(SMsgHead), buf, tlen);
+ pHead->bodyLen = tlen + sizeof(SMsgHead);
+ taosMemoryFree(buf);
+ taosArrayDestroy(reqNew.pArray);
+ }
+ } else if (msgType == TDMT_VND_ALTER_TABLE) {
+ SVAlterTbReq req = {0};
+
+ if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ SMetaReader mr = {0};
+ metaReaderInit(&mr, pHandle->execHandle.pExecReader->pVnodeMeta, 0);
+
+ if (metaGetTableEntryByName(&mr, req.tbName) < 0) {
+ metaReaderClear(&mr);
+ goto end;
+ }
+ realTbSuid = mr.me.ctbEntry.suid;
+ metaReaderClear(&mr);
+ } else if (msgType == TDMT_VND_DROP_TABLE) {
+ SVDropTbBatchReq req = {0};
+
+ if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ int32_t needRebuild = 0;
+ SVDropTbReq* pDropReq = NULL;
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+
+ if(pDropReq->suid == tbSuid){
+ needRebuild++;
+ }
+ }
+ if(needRebuild == 0){
+ // do nothing
+ }else if(needRebuild == req.nReqs){
+ realTbSuid = tbSuid;
+ }else{
+ realTbSuid = tbSuid;
+ SVDropTbBatchReq reqNew = {0};
+ reqNew.pArray = taosArrayInit(req.nReqs, sizeof(SVDropTbReq));
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+ if(pDropReq->suid == tbSuid){
+ reqNew.nReqs++;
+ taosArrayPush(reqNew.pArray, pDropReq);
+ }
+ }
+
+ int tlen;
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVDropTbBatchReq, &reqNew, tlen, ret);
+ void* buf = taosMemoryMalloc(tlen);
+ if (NULL == buf) {
+ taosArrayDestroy(reqNew.pArray);
+ goto end;
+ }
+ SEncoder coderNew = {0};
+ tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead));
+ tEncodeSVDropTbBatchReq(&coderNew, &reqNew);
+ tEncoderClear(&coderNew);
+ memcpy(pHead->body + sizeof(SMsgHead), buf, tlen);
+ pHead->bodyLen = tlen + sizeof(SMsgHead);
+ taosMemoryFree(buf);
+ taosArrayDestroy(reqNew.pArray);
+ }
+ } else if (msgType == TDMT_VND_DELETE) {
+ SDeleteRes req = {0};
+ if (tDecodeDeleteRes(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else{
+ ASSERT(0);
+ }
+
+ end:
+ tDecoderClear(&coder);
+ return tbSuid == realTbSuid;
+}
+
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead) {
int32_t code = 0;
taosThreadMutexLock(&pHandle->pWalReader->mutex);
@@ -53,9 +209,11 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
code = -1;
goto END;
}
- *fetchOffset = offset;
- code = 0;
- goto END;
+ if(isValValidForTable(pHandle, pHead)){
+ *fetchOffset = offset;
+ code = 0;
+ goto END;
+ }
}
}
code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead);
@@ -68,7 +226,7 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
offset++;
}
}
-END:
+ END:
taosThreadMutexUnlock(&pHandle->pWalReader->mutex);
return code;
}
@@ -398,7 +556,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
if (pIter == NULL) break;
STqHandle* pExec = (STqHandle*)pIter;
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- int32_t code = qUpdateQualifiedTableId(pExec->execHandle.execCol.task, tbUidList, isAdd);
+ int32_t code = qUpdateQualifiedTableId(pExec->execHandle.task, tbUidList, isAdd);
ASSERT(code == 0);
} else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) {
if (!isAdd) {
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index b9f38976747f7e73f6bc6b40fe9dd968a3b8cabe..61c68775559ebcbaca1853fdadecaaa1456c170c 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -476,7 +476,7 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
if (code) goto _err;
if (!state->aBlockL) {
- state->aBlockL = taosArrayInit(0, sizeof(SBlockIdx));
+ state->aBlockL = taosArrayInit(0, sizeof(SBlockL));
} else {
taosArrayClear(state->aBlockL);
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
index 66843d9a2844c44e77e798ab47032ef75370a544..ea9a7ec7d9b3df80edbb1e5f93db5b2420f908e5 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
@@ -18,7 +18,7 @@
#include "tcommon.h"
#include "tsdb.h"
-typedef struct SLastrowReader {
+typedef struct SCacheRowsReader {
SVnode* pVnode;
STSchema* pSchema;
uint64_t uid;
@@ -27,9 +27,9 @@ typedef struct SLastrowReader {
int32_t type;
int32_t tableIndex; // currently returned result tables
SArray* pTableList; // table id list
-} SLastrowReader;
+} SCacheRowsReader;
-static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) {
+static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds) {
ASSERT(pReader->numOfCols <= taosArrayGetSize(pBlock->pDataBlock));
int32_t numOfRows = pBlock->info.rows;
@@ -61,8 +61,10 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade
pBlock->info.rows += 1;
}
-int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
- SLastrowReader* p = taosMemoryCalloc(1, sizeof(SLastrowReader));
+int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
+ *pReader = NULL;
+
+ SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -81,9 +83,17 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
p->pTableList = pTableIdList;
p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES);
+ if (p->transferBuf == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) {
p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes);
+ if (p->transferBuf[i] == NULL) {
+ tsdbCacherowsReaderClose(p);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
}
}
@@ -91,8 +101,8 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
return TSDB_CODE_SUCCESS;
}
-int32_t tsdbLastrowReaderClose(void* pReader) {
- SLastrowReader* p = pReader;
+int32_t tsdbCacherowsReaderClose(void* pReader) {
+ SCacheRowsReader* p = pReader;
if (p->pSchema != NULL) {
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
@@ -107,28 +117,56 @@ int32_t tsdbLastrowReaderClose(void* pReader) {
return TSDB_CODE_SUCCESS;
}
-int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
+static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint64_t uid, STSRow** pRow, LRUHandle** h) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if ((pr->type & CACHESCAN_RETRIEVE_LAST_ROW) == CACHESCAN_RETRIEVE_LAST_ROW) {
+ code = tsdbCacheGetLastrowH(lruCache, uid, pr->pVnode->pTsdb, h);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // no data in the table of Uid
+ if (*h != NULL) {
+ *pRow = (STSRow*)taosLRUCacheValue(lruCache, *h);
+ }
+ } else {
+ code = tsdbCacheGetLastH(lruCache, uid, pr->pVnode->pTsdb, h);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // no data in the table of Uid
+ if (*h != NULL) {
+ SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, *h);
+ tsdbCacheLastArray2Row(pLast, pRow, pr->pSchema);
+ }
+ }
+
+ return code;
+}
+
+int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
if (pReader == NULL || pResBlock == NULL) {
return TSDB_CODE_INVALID_PARA;
}
- SLastrowReader* pr = pReader;
+ SCacheRowsReader* pr = pReader;
+ int32_t code = TSDB_CODE_SUCCESS;
SLRUCache* lruCache = pr->pVnode->pTsdb->lruCache;
LRUHandle* h = NULL;
STSRow* pRow = NULL;
size_t numOfTables = taosArrayGetSize(pr->pTableList);
// retrieve the only one last row of all tables in the uid list.
- if (pr->type == LASTROW_RETRIEVE_TYPE_SINGLE) {
+ if ((pr->type & CACHESCAN_RETRIEVE_TYPE_SINGLE) == CACHESCAN_RETRIEVE_TYPE_SINGLE) {
int64_t lastKey = INT64_MIN;
bool internalResult = false;
for (int32_t i = 0; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = taosArrayGet(pr->pTableList, i);
- int32_t code = tsdbCacheGetLastrowH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- // int32_t code = tsdbCacheGetLastH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- if (code != TSDB_CODE_SUCCESS) {
+ code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
+ if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -136,9 +174,6 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t
continue;
}
- pRow = (STSRow*)taosLRUCacheValue(lruCache, h);
- // SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, h);
- // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema);
if (pRow->ts > lastKey) {
// Set result row into the same rowIndex repeatly, so we need to check if the internal result row has already
// appended or not.
@@ -155,25 +190,18 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t
tsdbCacheRelease(lruCache, h);
}
- } else if (pr->type == LASTROW_RETRIEVE_TYPE_ALL) {
+ } else if ((pr->type & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) {
for (int32_t i = pr->tableIndex; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = taosArrayGet(pr->pTableList, i);
-
- int32_t code = tsdbCacheGetLastrowH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- // int32_t code = tsdbCacheGetLastH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- if (code != TSDB_CODE_SUCCESS) {
+ code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
+ if (code != TSDB_CODE_SUCCESS) {
return code;
}
- // no data in the table of Uid
if (h == NULL) {
continue;
}
- pRow = (STSRow*)taosLRUCacheValue(lruCache, h);
- // SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, h);
- // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema);
-
saveOneRow(pRow, pResBlock, pr, slotIds);
taosArrayPush(pTableUidList, &pKeyInfo->uid);
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 4a5d8d11f910017bed16c318db2b01bfe44a3180..17f30b51942199936b2fe671f82da27c6f2ea49d 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -16,9 +16,9 @@
#include "osDef.h"
#include "tsdb.h"
-#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
+#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
#define ALL_ROWS_CHECKED_INDEX (INT16_MIN)
-#define DEFAULT_ROW_INDEX_VAL (-1)
+#define INITIAL_ROW_INDEX_VAL (-1)
typedef enum {
EXTERNAL_ROWS_PREV = 0x1,
@@ -129,16 +129,22 @@ typedef struct SFileBlockDumpInfo {
bool allDumped;
} SFileBlockDumpInfo;
+typedef struct SUidOrderCheckInfo {
+ uint64_t* tableUidList; // access table uid list in uid ascending order list
+ int32_t currentIndex; // index in table uid list
+} SUidOrderCheckInfo;
+
typedef struct SReaderStatus {
- bool loadFromFile; // check file stage
- SHashObj* pTableMap; // SHash
- STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks.
+ bool loadFromFile; // check file stage
+ bool composedDataBlock; // the returned data block is a composed block or not
+ SHashObj* pTableMap; // SHash
+ STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks.
+ SUidOrderCheckInfo uidCheckInfo; // check all table in uid order
SFileBlockDumpInfo fBlockDumpInfo;
- SDFileSet* pCurrentFileset; // current opened file set
+ SDFileSet* pCurrentFileset; // current opened file set
SBlockData fileBlockData;
SFilesetIter fileIter;
SDataBlockIter blockIter;
- bool composedDataBlock; // the returned data block is a composed block or not
} SReaderStatus;
struct STsdbReader {
@@ -166,7 +172,7 @@ struct STsdbReader {
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity,
STsdbReader* pReader);
-static TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader);
+static TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader);
static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader,
SRowMerger* pMerger);
static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger);
@@ -178,10 +184,12 @@ static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pR
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
-static void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
+static int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
STsdbReader* pReader, bool* freeTSRow);
-static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
+static int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
STSRow** pTSRow);
+static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, STsdbReader* pReader);
+
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
STbData* piMemTbData);
static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* retentions, const char* idstr,
@@ -226,7 +234,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
}
for (int32_t j = 0; j < numOfTables; ++j) {
- STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid, .indexInBlockL = DEFAULT_ROW_INDEX_VAL};
+ STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid, .indexInBlockL = INITIAL_ROW_INDEX_VAL};
if (ASCENDING_TRAVERSE(pTsdbReader->order)) {
if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReader->window.skey) {
info.lastKey = pTsdbReader->window.skey;
@@ -258,7 +266,9 @@ static void resetDataBlockScanInfo(SHashObj* pTableMap) {
p->iter.iter = tsdbTbDataIterDestroy(p->iter.iter);
}
- p->delSkyline = taosArrayDestroy(p->delSkyline);
+ p->fileDelIndex = -1;
+ p->delSkyline = taosArrayDestroy(p->delSkyline);
+ p->lastBlockDelIndex = INITIAL_ROW_INDEX_VAL;
}
}
@@ -406,7 +416,7 @@ _err:
return false;
}
-static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, SHashObj* pTableMap) {
+static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) {
pIter->order = order;
pIter->index = -1;
pIter->numOfBlocks = 0;
@@ -415,7 +425,6 @@ static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, SHashOb
} else {
taosArrayClear(pIter->blockList);
}
- pIter->pTableMap = pTableMap;
}
static void cleanupDataBlockIterator(SDataBlockIter* pIter) { taosArrayDestroy(pIter->blockList); }
@@ -511,86 +520,6 @@ _end:
return code;
}
-// void tsdbResetQueryHandleForNewTable(STsdbReader* queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList,
-// int32_t tWinIdx) {
-// STsdbReader* pTsdbReadHandle = queryHandle;
-
-// pTsdbReadHandle->order = pCond->order;
-// pTsdbReadHandle->window = pCond->twindows[tWinIdx];
-// pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL;
-// pTsdbReadHandle->cur.fid = -1;
-// pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER;
-// pTsdbReadHandle->checkFiles = true;
-// pTsdbReadHandle->activeIndex = 0; // current active table index
-// pTsdbReadHandle->locateStart = false;
-// pTsdbReadHandle->loadExternalRow = pCond->loadExternalRows;
-
-// if (ASCENDING_TRAVERSE(pCond->order)) {
-// assert(pTsdbReadHandle->window.skey <= pTsdbReadHandle->window.ekey);
-// } else {
-// assert(pTsdbReadHandle->window.skey >= pTsdbReadHandle->window.ekey);
-// }
-
-// // allocate buffer in order to load data blocks from file
-// memset(pTsdbReadHandle->suppInfo.pstatis, 0, sizeof(SColumnDataAgg));
-// memset(pTsdbReadHandle->suppInfo.plist, 0, POINTER_BYTES);
-
-// tsdbInitDataBlockLoadInfo(&pTsdbReadHandle->dataBlockLoadInfo);
-// tsdbInitCompBlockLoadInfo(&pTsdbReadHandle->compBlockLoadInfo);
-
-// SArray* pTable = NULL;
-// // STsdbMeta* pMeta = tsdbGetMeta(pTsdbReadHandle->pTsdb);
-
-// // pTsdbReadHandle->pTableCheckInfo = destroyTableCheckInfo(pTsdbReadHandle->pTableCheckInfo);
-
-// pTsdbReadHandle->pTableCheckInfo = NULL; // createDataBlockScanInfo(pTsdbReadHandle, groupList, pMeta,
-// // &pTable);
-// if (pTsdbReadHandle->pTableCheckInfo == NULL) {
-// // tsdbReaderClose(pTsdbReadHandle);
-// terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
-// }
-
-// // pTsdbReadHandle->prev = doFreeColumnInfoData(pTsdbReadHandle->prev);
-// // pTsdbReadHandle->next = doFreeColumnInfoData(pTsdbReadHandle->next);
-// }
-
-// SArray* tsdbGetQueriedTableList(STsdbReader** pHandle) {
-// assert(pHandle != NULL);
-
-// STsdbReader* pTsdbReadHandle = (STsdbReader*)pHandle;
-
-// size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo);
-// SArray* res = taosArrayInit(size, POINTER_BYTES);
-// return res;
-// }
-
-// static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
-// int32_t firstSlot = 0;
-// int32_t lastSlot = numOfBlocks - 1;
-
-// int32_t midSlot = firstSlot;
-
-// while (1) {
-// numOfBlocks = lastSlot - firstSlot + 1;
-// midSlot = (firstSlot + (numOfBlocks >> 1));
-
-// if (numOfBlocks == 1) break;
-
-// if (skey > pBlock[midSlot].maxKey.ts) {
-// if (numOfBlocks == 2) break;
-// if ((order == TSDB_ORDER_DESC) && (skey < pBlock[midSlot + 1].minKey.ts)) break;
-// firstSlot = midSlot + 1;
-// } else if (skey < pBlock[midSlot].minKey.ts) {
-// if ((order == TSDB_ORDER_ASC) && (skey > pBlock[midSlot - 1].maxKey.ts)) break;
-// lastSlot = midSlot - 1;
-// } else {
-// break; // got the slot
-// }
-// }
-
-// return midSlot;
-// }
-
static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) {
SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx));
@@ -651,7 +580,7 @@ static void cleanupTableScanInfo(SHashObj* pTableMap) {
}
// reset the index in last block when handing a new file
- px->indexInBlockL = -1;
+ px->indexInBlockL = INITIAL_ROW_INDEX_VAL;
tMapDataClear(&px->mapData);
taosArrayClear(px->pBlockList);
}
@@ -729,9 +658,11 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray*
int32_t total = pBlockNum->numOfLastBlocks + pBlockNum->numOfBlocks;
double el = (taosGetTimestampUs() - st) / 1000.0;
- tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, lastBlock:%d, size:%.2f Kb, elapsed time:%.2f ms %s",
- numOfTables, pBlockNum->numOfBlocks, numOfQTable, pBlockNum->numOfLastBlocks, sizeInDisk
- / 1000.0, el, pReader->idStr);
+ tsdbDebug(
+ "load block of %d tables completed, blocks:%d in %d tables, lastBlock:%d, block-info-size:%.2f Kb, elapsed "
+ "time:%.2f ms %s",
+ numOfTables, pBlockNum->numOfBlocks, numOfQTable, pBlockNum->numOfLastBlocks, sizeInDisk / 1000.0, el,
+ pReader->idStr);
pReader->cost.numOfBlocks += total;
pReader->cost.headFileLoadTime += el;
@@ -859,71 +790,32 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData) {
int64_t st = taosGetTimestampUs();
- double elapsedTime = 0;
- int32_t code = 0;
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ ASSERT(pBlockInfo != NULL);
- if (pBlockInfo != NULL) {
- SBlock* pBlock = getCurrentBlock(pBlockIter);
- code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
- if (code != TSDB_CODE_SUCCESS) {
- tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
- ", rows:%d, code:%s %s",
- pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
- tstrerror(code), pReader->idStr);
- goto _error;
- }
-
- elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
-
- tsdbDebug("%p load file block into buffer, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
- ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
+ SBlock* pBlock = getCurrentBlock(pBlockIter);
+ int32_t code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
+ ", rows:%d, code:%s %s",
pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
- pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
- } else {
-#if 0
- SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
-
- uint64_t uid = pBlockInfo->uid;
- SArray* pBlocks = pLastBlockReader->pBlockL;
-
- pLastBlockReader->currentBlockIndex = -1;
-
- // find the correct SBlockL
- for(int32_t i = 0; i < taosArrayGetSize(pBlocks); ++i) {
- SBlockL* pBlock = taosArrayGet(pBlocks, i);
- if (pBlock->minUid >= uid && pBlock->maxUid <= uid) {
- pLastBlockReader->currentBlockIndex = i;
- break;
- }
- }
+ tstrerror(code), pReader->idStr);
+ return code;
+ }
-// SBlockL* pBlockL = taosArrayGet(pLastBlockReader->pBlockL, *index);
- code = tsdbReadLastBlock(pReader->pFileReader, pBlockL, pBlockData);
- if (code != TSDB_CODE_SUCCESS) {
- tsdbDebug("%p error occurs in loading last block into buffer, last block index:%d, total:%d brange:%" PRId64 "-%" PRId64
- ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", code:%s %s",
- pReader, *index, pBlockIter->numOfBlocks.numOfLastBlocks, 0, 0, pBlockL->nRow,
- pBlockL->minVer, pBlockL->maxVer, tstrerror(code), pReader->idStr);
- goto _error;
- }
+ double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
- tsdbDebug("%p load last file block into buffer, last block index:%d, total:%d brange:%" PRId64 "-%" PRId64
- ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
- pReader, *index, pBlockIter->numOfBlocks.numOfLastBlocks, 0, 0, pBlockL->nRow,
- pBlockL->minVer, pBlockL->maxVer, elapsedTime, pReader->idStr);
-#endif
- }
+ tsdbDebug("%p load file block into buffer, global index:%d, index in table block list:%d, brange:%" PRId64 "-%" PRId64
+ ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
+ pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
+ pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
pReader->cost.blockLoadTime += elapsedTime;
pDumpInfo->allDumped = false;
return TSDB_CODE_SUCCESS;
-
-_error:
- return code;
}
static void cleanupBlockOrderSupporter(SBlockOrderSupporter* pSup) {
@@ -977,10 +869,10 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v
}
static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) {
- SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
- if (pFBlock != NULL) {
- STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
- int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx);
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
+ if (pBlockInfo != NULL) {
+ STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
+ int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pBlockInfo->tbBlockIdx);
tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetBlock);
}
@@ -996,6 +888,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
pBlockIter->numOfBlocks = numOfBlocks;
taosArrayClear(pBlockIter->blockList);
+ pBlockIter->pTableMap = pReader->status.pTableMap;
// access data blocks according to the offset of each block in asc/desc order.
int32_t numOfTables = (int32_t)taosHashGetSize(pReader->status.pTableMap);
@@ -1394,7 +1287,7 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader*
return pReader->pMemSchema;
}
-static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
+static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) {
SRowMerger merge = {0};
STSRow* pTSRow = NULL;
@@ -1502,7 +1395,11 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
}
}
- tRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
@@ -1510,88 +1407,95 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
return TSDB_CODE_SUCCESS;
}
-static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
- SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) {
- SRowMerger merge = {0};
- STSRow* pTSRow = NULL;
- SBlockData* pBlockData = &pReader->status.fileBlockData;
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader, STsdbReader* pReader,
+ STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData,
+ bool mergeBlockData) {
+ SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
+ int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
- TSDBKEY k = TSDBROW_KEY(pRow);
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- SArray* pDelList = pBlockScanInfo->delSkyline;
- bool freeTSRow = false;
- uint64_t uid = pBlockScanInfo->uid;
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
- // ascending order traverse
- if (ASCENDING_TRAVERSE(pReader->order)) {
- if (key < k.ts) {
- // imem & mem are all empty, only file exist
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- freeTSRow = true;
- }
- } else if (k.ts < key) { // k.ts < key
- doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
- } else { // k.ts == key, ascending order: file block ----> imem rows -----> mem rows
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
- tRowMerge(&merge, pRow);
- doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- freeTSRow = true;
+ // merge with block data if ts == key
+ if (mergeBlockData) {
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ }
+
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader* pLastBlockReader, int64_t key,
+ STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+
+ if (pBlockData->nRow > 0) {
+ // no last block available, only data block exists
+ if (pLastBlockReader->lastBlockData.nRow == 0 || (!hasDataInLastBlock(pLastBlockReader))) {
+ return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
}
- } else { // descending order scan
- if (key < k.ts) {
- doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
- } else if (k.ts < key) {
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
+
+ // row in last file block
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+ int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
+ ASSERT(ts >= key);
+
+ if (ASCENDING_TRAVERSE(pReader->order)) {
+ if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist
+ return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
+ } else if (key == ts) {
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
+
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- freeTSRow = true;
- }
- } else { // descending order: mem rows -----> imem rows ------> file block
- STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
- tRowMergerInit(&merge, pRow, pSchema);
- doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
- tRowMerge(&merge, &fRow);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
- tRowMergerGetRow(&merge, &pTSRow);
- freeTSRow = true;
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return code;
+ } else {
+ ASSERT(0);
+ return TSDB_CODE_SUCCESS;
+ }
+ } else { // desc order
+ return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, pBlockData, true);
}
+ } else { // only last block exists
+ return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
}
-
- tRowMergerClear(&merge);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
-
- if (freeTSRow) {
- taosMemoryFree(pTSRow);
- }
-
- return TSDB_CODE_SUCCESS;
}
-static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
+static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData,
+ SLastBlockReader* pLastBlockReader) {
SRowMerger merge = {0};
STSRow* pTSRow = NULL;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SArray* pDelList = pBlockScanInfo->delSkyline;
- TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader);
- TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pDelList, pReader);
+ TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader);
+ TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader);
ASSERT(pRow != NULL && piRow != NULL);
SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
@@ -1605,7 +1509,7 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
- int64_t minKey = 0;//INT64_MAX;
+ int64_t minKey = 0;
if (ASCENDING_TRAVERSE(pReader->order)) {
minKey = INT64_MAX; // let's find the minimum
if (minKey > k.ts) {
@@ -1726,14 +1630,19 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo
}
}
- tRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
+ return code;
}
+#if 0
static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
SRowMerger merge = {0};
STSRow* pTSRow = NULL;
@@ -1741,8 +1650,8 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SArray* pDelList = pBlockScanInfo->delSkyline;
- TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader);
- TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pDelList, pReader);
+ TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader);
+ TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader);
ASSERT(pRow != NULL && piRow != NULL);
int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
@@ -1779,7 +1688,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
// [3] ik.ts < key <= k.ts
// [4] ik.ts < k.ts <= key
if (ik.ts < k.ts) {
- doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
+ doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
if (freeTSRow) {
taosMemoryFree(pTSRow);
@@ -1790,7 +1699,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
// [5] k.ts < key <= ik.ts
// [6] k.ts < ik.ts <= key
if (k.ts < ik.ts) {
- doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow);
+ doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
if (freeTSRow) {
taosMemoryFree(pTSRow);
@@ -1836,7 +1745,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
// [3] ik.ts > k.ts >= Key
// [4] ik.ts > key >= k.ts
if (ik.ts > key) {
- doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
+ doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
if (freeTSRow) {
taosMemoryFree(pTSRow);
@@ -1859,7 +1768,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
//[7] key = ik.ts > k.ts
if (key == ik.ts) {
- doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
+ doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
tRowMerge(&merge, &fRow);
@@ -1876,6 +1785,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
ASSERT(0);
return -1;
}
+#endif
static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo,
STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
@@ -1926,7 +1836,8 @@ static void setAllRowsChecked(SLastBlockReader *pLastBlockReader) {
}
static bool nextRowInLastBlock(SLastBlockReader *pLastBlockReader, STableBlockScanInfo* pBlockScanInfo) {
- int32_t step = (pLastBlockReader->order == TSDB_ORDER_ASC) ? 1 : -1;
+ bool asc = ASCENDING_TRAVERSE(pLastBlockReader->order);
+ int32_t step = (asc) ? 1 : -1;
if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
return false;
}
@@ -1935,8 +1846,20 @@ static bool nextRowInLastBlock(SLastBlockReader *pLastBlockReader, STableBlockSc
SBlockData* pBlockData = &pLastBlockReader->lastBlockData;
for(int32_t i = *(pLastBlockReader->rowIndex); i < pBlockData->nRow && i >= 0; i += step) {
- if (pBlockData->aUid != NULL && pBlockData->aUid[i] != pLastBlockReader->uid) {
- continue;
+ if (pBlockData->aUid != NULL) {
+ if (asc) {
+ if (pBlockData->aUid[i] < pLastBlockReader->uid) {
+ continue;
+ } else if (pBlockData->aUid[i] > pLastBlockReader->uid) {
+ break;
+ }
+ } else {
+ if (pBlockData->aUid[i] > pLastBlockReader->uid) {
+ continue;
+ } else if (pBlockData->aUid[i] < pLastBlockReader->uid) {
+ break;
+ }
+ }
}
int64_t ts = pBlockData->aTSKEY[i];
@@ -1982,132 +1905,62 @@ static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) {
if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
return false;
}
+
+ ASSERT(pLastBlockReader->lastBlockData.nRow > 0);
return true;
}
-// todo refactor
+int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, STsdbReader* pReader) {
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+
+ if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
+ return TSDB_CODE_SUCCESS;
+ } else {
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
+
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo,
SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
int64_t key = (pBlockData->nRow > 0)? pBlockData->aTSKEY[pDumpInfo->rowIndex]:INT64_MIN;
- TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
- TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
+ TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
+ TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
if (pBlockScanInfo->iter.hasVal && pBlockScanInfo->iiter.hasVal) {
- return doMergeMultiLevelRowsRv(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
+ return doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
} else {
// imem + file + last block
if (pBlockScanInfo->iiter.hasVal) {
- return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader);
+ return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader);
}
- // mem + file
+ // mem + file + last block
if (pBlockScanInfo->iter.hasVal) {
- return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader);
+ return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader);
}
- if (pBlockData->nRow > 0) {
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
-
- // no last block available, only data block exists
- if (pLastBlockReader->lastBlockData.nRow == 0 || (!hasDataInLastBlock(pLastBlockReader))) {
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
-
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
-
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- }
- }
-
- // row in last file block
- int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
- ASSERT(ts >= key);
-
- if (ASCENDING_TRAVERSE(pReader->order)) {
- if (key < ts) {
- // imem & mem are all empty, only file exist
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
-
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
-
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- }
- } else if (key == ts) {
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
-
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
-
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
-
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- } else {
- ASSERT(0);
- return TSDB_CODE_SUCCESS;
- }
- } else { // desc order
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
-
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
- tRowMergerInit(&merge, &fRow1, pReader->pSchema);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
-
- if (ts == key) {
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- }
-
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
-
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- }
- } else { // only last block exists
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
- int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
-
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
-
- TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
-
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
-
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
-
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- }
+ // files data blocks + last block
+ return mergeFileBlockAndLastBlock(pReader, pLastBlockReader, key, pBlockScanInfo, pBlockData);
}
}
@@ -2132,9 +1985,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
while (1) {
// todo check the validate of row in file block
+ bool hasBlockData = false;
{
- bool hasBlockData = false;
-
while (pBlockData->nRow > 0) { // find the first qualified row in data block
if (isValidFileBlockRow(pBlockData, pDumpInfo, pBlockScanInfo, pReader)) {
hasBlockData = true;
@@ -2149,13 +2001,13 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
break;
}
}
+ }
- bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
+ bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
- // no data in last block and block, no need to proceed.
- if ((hasBlockData == false) && (hasBlockLData == false)) {
- break;
- }
+ // no data in last block and block, no need to proceed.
+ if ((hasBlockData == false) && (hasBlockLData == false)) {
+ break;
}
buildComposedDataBlockImpl(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
@@ -2178,9 +2030,12 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
setComposedBlockFlag(pReader, true);
int64_t et = taosGetTimestampUs();
- tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, elapsed time:%.2f ms %s",
- pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
- pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr);
+ if (pResBlock->info.rows > 0) {
+ tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64
+ " rows:%d, elapsed time:%.2f ms %s",
+ pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
+ pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr);
+ }
return TSDB_CODE_SUCCESS;
}
@@ -2337,12 +2192,12 @@ static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* p
TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
initMemDataIterator(pScanInfo, pReader);
- TSDBROW* pRow = getValidRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader);
+ TSDBROW* pRow = getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader);
if (pRow != NULL) {
key = TSDBROW_KEY(pRow);
}
- pRow = getValidRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader);
+ pRow = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader);
if (pRow != NULL) {
TSDBKEY k = TSDBROW_KEY(pRow);
if (key.ts > k.ts) {
@@ -2470,22 +2325,98 @@ static int32_t doLoadRelatedLastBlock(SLastBlockReader* pLastBlockReader, STable
return TSDB_CODE_SUCCESS;
}
-static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
- SReaderStatus* pStatus = &pReader->status;
- SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader;
+static int32_t uidComparFunc(const void* p1, const void* p2) {
+ uint64_t pu1 = *(uint64_t*) p1;
+ uint64_t pu2 = *(uint64_t*) p2;
+ if (pu1 == pu2) {
+ return 0;
+ } else {
+ return (pu1 < pu2)? -1:1;
+ }
+}
- while(1) {
- if (pStatus->pTableIter == NULL) {
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
+static void extractOrderedTableUidList(SUidOrderCheckInfo *pOrderCheckInfo, SReaderStatus* pStatus) {
+ int32_t index = 0;
+ int32_t total = taosHashGetSize(pStatus->pTableMap);
+
+ void* p = taosHashIterate(pStatus->pTableMap, NULL);
+ while(p != NULL) {
+ STableBlockScanInfo* pScanInfo = p;
+ pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid;
+ p = taosHashIterate(pStatus->pTableMap, p);
+ }
+
+ taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc);
+}
+
+static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) {
+ if (pOrderCheckInfo->tableUidList == NULL) {
+ int32_t total = taosHashGetSize(pStatus->pTableMap);
+
+ pOrderCheckInfo->currentIndex = 0;
+ pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t));
+ if (pOrderCheckInfo->tableUidList == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ extractOrderedTableUidList(pOrderCheckInfo, pStatus);
+
+ uint64_t uid = pOrderCheckInfo->tableUidList[0];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+ } else {
+ if (pStatus->pTableIter == NULL) { // it is the last block of a new file
+// ASSERT(pOrderCheckInfo->currentIndex == taosHashGetSize(pStatus->pTableMap));
+
+ pOrderCheckInfo->currentIndex = 0;
+ uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+
+ // the tableMap has already updated
if (pStatus->pTableIter == NULL) {
- return TSDB_CODE_SUCCESS;
+ void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, taosHashGetSize(pStatus->pTableMap)*sizeof(uint64_t));
+ if (p == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pOrderCheckInfo->tableUidList = p;
+ extractOrderedTableUidList(pOrderCheckInfo, pStatus);
+
+ uid = pOrderCheckInfo->tableUidList[0];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
}
}
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+static bool moveToNextTable(SUidOrderCheckInfo *pOrderedCheckInfo, SReaderStatus* pStatus) {
+ pOrderedCheckInfo->currentIndex += 1;
+ if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) {
+ pStatus->pTableIter = NULL;
+ return false;
+ }
+
+ uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+ ASSERT(pStatus->pTableIter != NULL);
+ return true;
+}
+
+static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
+ SReaderStatus* pStatus = &pReader->status;
+ SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader;
+
+ SUidOrderCheckInfo *pOrderedCheckInfo = &pStatus->uidCheckInfo;
+ int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pStatus);
+ if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) {
+ return code;
+ }
+
+ while(1) {
// load the last data block of current table
- // todo opt perf by avoiding load last block repeatly
STableBlockScanInfo* pScanInfo = pStatus->pTableIter;
- int32_t code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader);
+ code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2493,19 +2424,20 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
if (pLastBlockReader->currentBlockIndex != -1) {
initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL);
int32_t index = pScanInfo->indexInBlockL;
- if (index == DEFAULT_ROW_INDEX_VAL || index == pLastBlockReader->lastBlockData.nRow) {
+
+ if (index == INITIAL_ROW_INDEX_VAL || index == pLastBlockReader->lastBlockData.nRow) {
bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo);
if (!hasData) { // current table does not have rows in last block, try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
+ bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
+ if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
continue;
}
}
} else { // no data in last block, try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
+ bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
+ if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
continue;
@@ -2521,8 +2453,8 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
}
// current table is exhausted, let's try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
+ bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
+ if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
}
@@ -2560,7 +2492,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
// note: the lastblock may be null here
initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL);
- if (pScanInfo->indexInBlockL == DEFAULT_ROW_INDEX_VAL || pScanInfo->indexInBlockL == pLastBlockReader->lastBlockData.nRow) {
+ if (pScanInfo->indexInBlockL == INITIAL_ROW_INDEX_VAL || pScanInfo->indexInBlockL == pLastBlockReader->lastBlockData.nRow) {
bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo);
}
}
@@ -2672,7 +2604,7 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks);
} else { // no block data, only last block exists
tBlockDataReset(&pReader->status.fileBlockData);
- resetDataBlockIterator(pBlockIter, pReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(pBlockIter, pReader->order);
}
SLastBlockReader* pLReader = pReader->status.fileIter.pLastBlockReader;
@@ -2744,7 +2676,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
initBlockDumpInfo(pReader, pBlockIter);
} else if (taosArrayGetSize(pReader->status.fileIter.pLastBlockReader->pBlockL) > 0) { // data blocks in current file are exhausted, let's try the next file now
tBlockDataReset(&pReader->status.fileBlockData);
- resetDataBlockIterator(pBlockIter, pReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(pBlockIter, pReader->order);
goto _begin;
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
@@ -2928,7 +2860,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
return false;
}
-TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader) {
+TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader) {
if (!pIter->hasVal) {
return NULL;
}
@@ -2976,7 +2908,7 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe
}
// data exists but not valid
- TSDBROW* pRow = getValidRow(pIter, pDelList, pReader);
+ TSDBROW* pRow = getValidMemRow(pIter, pDelList, pReader);
if (pRow == NULL) {
break;
}
@@ -3100,7 +3032,6 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
return TSDB_CODE_SUCCESS;
}
-// todo check if the rows are dropped or not
int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger) {
while(nextRowInLastBlock(pLastBlockReader, pScanInfo)) {
int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
@@ -3115,8 +3046,8 @@ int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockSc
return TSDB_CODE_SUCCESS;
}
-void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
- STsdbReader* pReader, bool* freeTSRow) {
+int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
+ STsdbReader* pReader, bool* freeTSRow) {
TSDBROW* pNextRow = NULL;
TSDBROW current = *pRow;
@@ -3126,19 +3057,19 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe
if (!pIter->hasVal) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
} else { // has next point in mem/imem
- pNextRow = getValidRow(pIter, pDelList, pReader);
+ pNextRow = getValidMemRow(pIter, pDelList, pReader);
if (pNextRow == NULL) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
}
if (current.pTSRow->ts != pNextRow->pTSRow->ts) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
}
}
}
@@ -3158,13 +3089,17 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe
tRowMergerAdd(&merge, pNextRow, pTSchema1);
doMergeRowsInBuf(pIter, uid, current.pTSRow->ts, pDelList, &merge, pReader);
- tRowMergerGetRow(&merge, pTSRow);
- tRowMergerClear(&merge);
+ int32_t code = tRowMergerGetRow(&merge, pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ tRowMergerClear(&merge);
*freeTSRow = true;
+ return TSDB_CODE_SUCCESS;
}
-void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
+int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
STSRow** pTSRow) {
SRowMerger merge = {0};
@@ -3189,14 +3124,16 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
}
- tRowMergerGetRow(&merge, pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, pTSRow);
+ return code;
}
int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow, int64_t endKey,
bool* freeTSRow) {
- TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
- TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
+ TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
+ TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
SArray* pDelList = pBlockScanInfo->delSkyline;
+ uint64_t uid = pBlockScanInfo->uid;
// todo refactor
bool asc = ASCENDING_TRAVERSE(pReader->order);
@@ -3218,26 +3155,30 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
- if (ik.ts < k.ts) { // ik.ts < k.ts
- doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
- } else if (k.ts < ik.ts) {
- doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (ik.ts != k.ts) {
+ if (((ik.ts < k.ts) && asc) || ((ik.ts > k.ts) && (!asc))) { // ik.ts < k.ts
+ code = doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
+ } else if (((k.ts < ik.ts) && asc) || ((k.ts > ik.ts) && (!asc))) {
+ code = doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
+ }
} else { // ik.ts == k.ts
- doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
*freeTSRow = true;
+ code = doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
}
- return TSDB_CODE_SUCCESS;
+ return code;
}
if (pBlockScanInfo->iter.hasVal && pRow != NULL) {
- doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
- return TSDB_CODE_SUCCESS;
+ return doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
}
if (pBlockScanInfo->iiter.hasVal && piRow != NULL) {
- doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
- return TSDB_CODE_SUCCESS;
+ return doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
}
return TSDB_CODE_SUCCESS;
@@ -3364,7 +3305,7 @@ int32_t tsdbSetTableId(STsdbReader* pReader, int64_t uid) {
ASSERT(pReader != NULL);
taosHashClear(pReader->status.pTableMap);
- STableBlockScanInfo info = {.lastKey = 0, .uid = uid, .indexInBlockL = DEFAULT_ROW_INDEX_VAL};
+ STableBlockScanInfo info = {.lastKey = 0, .uid = uid, .indexInBlockL = INITIAL_ROW_INDEX_VAL};
taosHashPut(pReader->status.pTableMap, &info.uid, sizeof(uint64_t), &info, sizeof(info));
return TDB_CODE_SUCCESS;
}
@@ -3435,10 +3376,10 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
}
if (pCond->suid != 0) {
- pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, -1);
+ pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, pCond->schemaVersion);
} else if (taosArrayGetSize(pTableList) > 0) {
STableKeyInfo* pKey = taosArrayGet(pTableList, 0);
- pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pKey->uid, -1);
+ pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pKey->uid, pCond->schemaVersion);
}
int32_t numOfTables = taosArrayGetSize(pTableList);
@@ -3460,7 +3401,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
- resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
// no data in files, let's try buffer in memory
if (pReader->status.fileIter.numOfFiles == 0) {
@@ -3481,7 +3422,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
}
initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader);
- resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order);
// no data in files, let's try buffer in memory
if (pPrevReader->status.fileIter.numOfFiles == 0) {
@@ -3532,6 +3473,8 @@ void tsdbReaderClose(STsdbReader* pReader) {
tsdbDataFReaderClose(&pReader->pFileReader);
}
+ taosMemoryFree(pReader->status.uidCheckInfo.tableUidList);
+
SFilesetIter* pFilesetIter = &pReader->status.fileIter;
if (pFilesetIter->pLastBlockReader != NULL) {
tBlockDataDestroy(&pFilesetIter->pLastBlockReader->lastBlockData, true);
@@ -3782,7 +3725,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
tsdbDataFReaderClose(&pReader->pFileReader);
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
- resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
resetDataBlockScanInfo(pReader->status.pTableMap);
int32_t code = 0;
diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c
index 6db9d5e6f40c5d35e52d90dd86b28f4cb7a94676..cfb04881e627fecb750d3ef434740760fab51cfc 100644
--- a/source/dnode/vnode/src/tsdb/tsdbUtil.c
+++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c
@@ -2039,7 +2039,7 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
size += pBlockCol->szOffset;
// value
- if (pColData->flag != (HAS_NULL | HAS_NONE)) {
+ if ((pColData->flag != (HAS_NULL | HAS_NONE)) && pColData->nData) {
code = tsdbCmprData((uint8_t *)pColData->pData, pColData->nData, pColData->type, cmprAlg, ppOut, nOut + size,
&pBlockCol->szValue, ppBuf);
if (code) goto _exit;
diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c
index 4418ce20e88b8c461e55fbe0d7b4a8348e032379..580ab8bc93cac3a5057821f238cf85fc1011fa38 100644
--- a/source/dnode/vnode/src/vnd/vnodeCfg.c
+++ b/source/dnode/vnode/src/vnd/vnodeCfg.c
@@ -117,6 +117,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "vndStats.ctables", pCfg->vndStats.numOfCTables) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "vndStats.ntables", pCfg->vndStats.numOfNTables) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "vndStats.timeseries", pCfg->vndStats.numOfTimeSeries) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "vndStats.ntimeseries", pCfg->vndStats.numOfNTimeSeries) < 0) return -1;
SJson *pNodeInfoArr = tjsonCreateArray();
tjsonAddItemToObject(pJson, "syncCfg.nodeInfo", pNodeInfoArr);
@@ -224,6 +225,8 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "vndStats.timeseries", pCfg->vndStats.numOfTimeSeries, code);
if (code < 0) return -1;
+ tjsonGetNumberValue(pJson, "vndStats.ntimeseries", pCfg->vndStats.numOfNTimeSeries, code);
+ if (code < 0) return -1;
SJson *pNodeInfoArr = tjsonGetObjectItem(pJson, "syncCfg.nodeInfo");
int arraySize = tjsonGetArraySize(pNodeInfoArr);
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index dcfbd33b903c9fcd55e216bd1b24c73f2845af7b..a4fd984fb762a934b48489da254b3af1aa4dc908 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -87,7 +87,6 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
pVnode->msgCb = msgCb;
taosThreadMutexInit(&pVnode->lock, NULL);
pVnode->blocked = false;
- pVnode->inClose = false;
tsem_init(&pVnode->syncSem, 0, 0);
tsem_init(&(pVnode->canCommit), 0, 1);
@@ -182,8 +181,6 @@ _err:
void vnodePreClose(SVnode *pVnode) {
if (pVnode) {
syncLeaderTransfer(pVnode->sync);
- pVnode->inClose = true;
- smaPreClose(pVnode->pSma);
}
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 7a8d168f4f1a2cb4b1379e9d5794ff58e83841bf..cd0c7ba1f37e459ebb9168128373ce935b069397 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -301,8 +301,6 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_SCH_QUERY_CONTINUE:
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
- case TDMT_VND_EXEC_RSMA:
- return smaProcessExec(pVnode->pSma, pMsg);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@@ -370,6 +368,10 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
}
void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) {
+ if (NULL == pMetaRsp) {
+ return;
+ }
+
strcpy(pMetaRsp->dbFName, pVnode->config.dbname);
pMetaRsp->dbId = pVnode->config.dbId;
pMetaRsp->vgId = TD_VID(pVnode);
@@ -380,14 +382,14 @@ static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq,
int32_t code = 0;
SVTrimDbReq trimReq = {0};
- vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp);
-
// decode
if (tDeserializeSVTrimDbReq(pReq, len, &trimReq) != 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
+ vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp);
+
// process
code = tsdbDoRetention(pVnode->pTsdb, trimReq.timestamp);
if (code) goto _exit;
@@ -494,6 +496,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
// loop to create table
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
+ memset(&cRsp, 0, sizeof(cRsp));
if ((terrno = grantCheck(TSDB_GRANT_TIMESERIES)) < 0) {
rcode = -1;
@@ -514,7 +517,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
}
// do create table
- if (metaCreateTable(pVnode->pMeta, version, pCreateReq) < 0) {
+ if (metaCreateTable(pVnode->pMeta, version, pCreateReq, &cRsp.pMeta) < 0) {
if (pCreateReq->flags & TD_CREATE_IF_NOT_EXISTS && terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
cRsp.code = TSDB_CODE_SUCCESS;
} else {
@@ -524,6 +527,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
cRsp.code = TSDB_CODE_SUCCESS;
tdFetchTbUidList(pVnode->pSma, &pStore, pCreateReq->ctb.suid, pCreateReq->uid);
taosArrayPush(tbUids, &pCreateReq->uid);
+ vnodeUpdateMetaRsp(pVnode, cRsp.pMeta);
}
taosArrayPush(rsp.pArray, &cRsp);
@@ -552,7 +556,7 @@ _exit:
pCreateReq = req.pReqs + iReq;
taosArrayDestroy(pCreateReq->ctb.tagName);
}
- taosArrayDestroy(rsp.pArray);
+ taosArrayDestroyEx(rsp.pArray, tFreeSVCreateTbRsp);
taosArrayDestroy(tbUids);
tDecoderClear(&decoder);
tEncoderClear(&encoder);
@@ -864,7 +868,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
goto _exit;
}
- if (metaCreateTable(pVnode->pMeta, version, &createTbReq) < 0) {
+ if (metaCreateTable(pVnode->pMeta, version, &createTbReq, &submitBlkRsp.pMeta) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
submitBlkRsp.code = terrno;
pRsp->code = terrno;
@@ -872,6 +876,10 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
taosArrayDestroy(createTbReq.ctb.tagName);
goto _exit;
}
+ } else {
+ if (NULL != submitBlkRsp.pMeta) {
+ vnodeUpdateMetaRsp(pVnode, submitBlkRsp.pMeta);
+ }
}
taosArrayPush(newTbUids, &createTbReq.uid);
@@ -915,11 +923,7 @@ _exit:
tEncodeSSubmitRsp(&encoder, &submitRsp);
tEncoderClear(&encoder);
- for (int32_t i = 0; i < taosArrayGetSize(submitRsp.pArray); i++) {
- taosMemoryFree(((SSubmitBlkRsp *)taosArrayGet(submitRsp.pArray, i))[0].tblFName);
- }
-
- taosArrayDestroy(submitRsp.pArray);
+ taosArrayDestroyEx(submitRsp.pArray, tFreeSSubmitBlkRsp);
// TODO: the partial success scenario and the error case
// => If partial success, extract the success submitted rows and reconstruct a new submit msg, and push to level
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index 777dcd0592ae69de003d5df0d1d9d2592302d195..9b62581051daac9c232409c0cb30d379e3a4d596 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -188,7 +188,7 @@ typedef struct SCtgTbCache {
typedef struct SCtgVgCache {
SRWLatch vgLock;
- SDBVgInfo *vgInfo;
+ SDBVgInfo *vgInfo;
} SCtgVgCache;
typedef struct SCtgDBCache {
@@ -224,7 +224,7 @@ typedef struct SCtgUserAuth {
} SCtgUserAuth;
typedef struct SCatalog {
- uint64_t clusterId;
+ uint64_t clusterId;
SHashObj *userCache; //key:user, value:SCtgUserAuth
SHashObj *dbCache; //key:dbname, value:SCtgDBCache
SCtgRentMgmt dbRent;
@@ -253,9 +253,9 @@ typedef struct SCtgJob {
int32_t jobResCode;
int32_t taskIdx;
SRWLatch taskLock;
-
+
uint64_t queryId;
- SCatalog* pCtg;
+ SCatalog* pCtg;
SRequestConnInfo conn;
void* userParam;
catalogCallback userFp;
@@ -279,7 +279,7 @@ typedef struct SCtgMsgCtx {
void* lastOut;
void* out;
char* target;
- SHashObj* pBatchs;
+ SHashObj* pBatchs;
} SCtgMsgCtx;
@@ -364,7 +364,7 @@ typedef struct SCtgCacheStat {
uint64_t numOfMetaHit;
uint64_t numOfMetaMiss;
uint64_t numOfIndexHit;
- uint64_t numOfIndexMiss;
+ uint64_t numOfIndexMiss;
uint64_t numOfUserHit;
uint64_t numOfUserMiss;
uint64_t numOfClear;
@@ -451,7 +451,7 @@ typedef struct SCtgCacheOperation {
int32_t opId;
void *data;
bool syncOp;
- tsem_t rspSem;
+ tsem_t rspSem;
bool stopQueue;
bool unLocked;
} SCtgCacheOperation;
@@ -466,7 +466,7 @@ typedef struct SCtgQueue {
bool stopQueue;
SCtgQNode *head;
SCtgQNode *tail;
- tsem_t reqSem;
+ tsem_t reqSem;
uint64_t qRemainNum;
} SCtgQueue;
@@ -475,7 +475,7 @@ typedef struct SCatalogMgmt {
int32_t jobPool;
SRWLatch lock;
SCtgQueue queue;
- TdThread updateThread;
+ TdThread updateThread;
SHashObj *pCluster; //key: clusterId, value: SCatalog*
SCatalogStat stat;
SCatalogCfg cfg;
@@ -528,8 +528,8 @@ typedef struct SCtgOperation {
#define CTG_META_SIZE(pMeta) (sizeof(STableMeta) + ((pMeta)->tableInfo.numOfTags + (pMeta)->tableInfo.numOfColumns) * sizeof(SSchema))
-#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST)
-#define CTG_DB_NOT_EXIST(code) (code == TSDB_CODE_MND_DB_NOT_EXIST)
+#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST)
+#define CTG_DB_NOT_EXIST(code) (code == TSDB_CODE_MND_DB_NOT_EXIST)
#define ctgFatal(param, ...) qFatal("CTG:%p " param, pCtg, __VA_ARGS__)
#define ctgError(param, ...) qError("CTG:%p " param, pCtg, __VA_ARGS__)
@@ -576,7 +576,7 @@ typedef struct SCtgOperation {
} \
} while (0)
-
+
#define CTG_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
#define CTG_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
#define CTG_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index b6e958e1929cc71dfa43ad018728e1f1844cb472..7b32eadcd415116f67db8526449c8a6759f45bcd 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -270,13 +270,22 @@ int32_t ctgUpdateTbMeta(SCatalog* pCtg, STableMetaRsp* rspMsg, bool syncOp) {
int32_t code = 0;
strcpy(output->dbFName, rspMsg->dbFName);
- strcpy(output->tbName, rspMsg->tbName);
output->dbId = rspMsg->dbId;
- SET_META_TYPE_TABLE(output->metaType);
+ if (TSDB_CHILD_TABLE == rspMsg->tableType && NULL == rspMsg->pSchemas) {
+ strcpy(output->ctbName, rspMsg->tbName);
- CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta));
+ SET_META_TYPE_CTABLE(output->metaType);
+
+ CTG_ERR_JRET(queryCreateCTableMetaFromMsg(rspMsg, &output->ctbMeta));
+ } else {
+ strcpy(output->tbName, rspMsg->tbName);
+
+ SET_META_TYPE_TABLE(output->metaType);
+
+ CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta));
+ }
CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncOp));
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index 64ca85edf45ac515bd7728883c171b04c399d148..585b33930c2cae0332ee77a3933d5a86288c77bc 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -39,7 +39,7 @@ int32_t ctgInitGetTbMetaTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(ctx->pName, name, sizeof(*name));
ctx->flag = CTG_FLAG_UNKNOWN_STB;
@@ -69,7 +69,7 @@ int32_t ctgInitGetTbMetasTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbMetaNum);
return TSDB_CODE_SUCCESS;
@@ -89,7 +89,7 @@ int32_t ctgInitGetDbVgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgDbVgCtx* ctx = task.taskCtx;
-
+
memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
taosArrayPush(pJob->pTasks, &task);
@@ -113,7 +113,7 @@ int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgDbCfgCtx* ctx = task.taskCtx;
-
+
memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
taosArrayPush(pJob->pTasks, &task);
@@ -137,7 +137,7 @@ int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgDbInfoCtx* ctx = task.taskCtx;
-
+
memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
taosArrayPush(pJob->pTasks, &task);
@@ -167,7 +167,7 @@ int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(ctx->pName, name, sizeof(*name));
tNameGetFullDbName(ctx->pName, ctx->dbFName);
@@ -197,7 +197,7 @@ int32_t ctgInitGetTbHashsTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbHashNum);
return TSDB_CODE_SUCCESS;
@@ -248,7 +248,7 @@ int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgIndexCtx* ctx = task.taskCtx;
-
+
strcpy(ctx->indexFName, name);
taosArrayPush(pJob->pTasks, &task);
@@ -272,7 +272,7 @@ int32_t ctgInitGetUdfTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgUdfCtx* ctx = task.taskCtx;
-
+
strcpy(ctx->udfName, name);
taosArrayPush(pJob->pTasks, &task);
@@ -296,7 +296,7 @@ int32_t ctgInitGetUserTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgUserCtx* ctx = task.taskCtx;
-
+
memcpy(&ctx->user, user, sizeof(*user));
taosArrayPush(pJob->pTasks, &task);
@@ -339,7 +339,7 @@ int32_t ctgInitGetTbIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(ctx->pName, name, sizeof(*name));
taosArrayPush(pJob->pTasks, &task);
@@ -368,7 +368,7 @@ int32_t ctgInitGetTbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(ctx->pName, name, sizeof(*name));
taosArrayPush(pJob->pTasks, &task);
@@ -387,7 +387,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con
taosHashCleanup(pTb);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
for (int32_t i = 0; i < pJob->dbVgNum; ++i) {
char* dbFName = taosArrayGet(pReq->pDbVgroup, i);
taosHashPut(pDb, dbFName, strlen(dbFName), dbFName, TSDB_DB_FNAME_LEN);
@@ -474,7 +474,7 @@ int32_t ctgInitTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, int32_t *tas
if (taskId) {
*taskId = tid;
}
-
+
return TSDB_CODE_SUCCESS;
}
@@ -510,7 +510,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
pJob->pCtg = pCtg;
pJob->conn = *pConn;
pJob->userParam = param;
-
+
pJob->tbMetaNum = tbMetaNum;
pJob->tbHashNum = tbHashNum;
pJob->qnodeNum = qnodeNum;
@@ -844,20 +844,20 @@ int32_t ctgDumpSvrVer(SCtgTask* pTask) {
pJob->jobRes.pSvrVer->code = pTask->code;
pJob->jobRes.pSvrVer->pRes = pTask->res;
-
+
return TSDB_CODE_SUCCESS;
}
int32_t ctgCallSubCb(SCtgTask *pTask) {
int32_t code = 0;
-
+
CTG_LOCK(CTG_WRITE, &pTask->lock);
-
+
int32_t parentNum = taosArrayGetSize(pTask->pParents);
for (int32_t i = 0; i < parentNum; ++i) {
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
SCtgTask* pParent = taosArrayGetP(pTask->pParents, i);
-
+
pParent->subRes.code = pTask->code;
if (TSDB_CODE_SUCCESS == pTask->code) {
code = (*gCtgAsyncFps[pTask->type].cloneFp)(pTask, &pParent->subRes.res);
@@ -868,22 +868,22 @@ int32_t ctgCallSubCb(SCtgTask *pTask) {
SCtgMsgCtx *pParMsgCtx = CTG_GET_TASK_MSGCTX(pParent, -1);
- pParMsgCtx->pBatchs = pMsgCtx->pBatchs;
+ pParMsgCtx->pBatchs = pMsgCtx->pBatchs;
CTG_ERR_JRET(pParent->subRes.fp(pParent));
}
-
+
_return:
CTG_UNLOCK(CTG_WRITE, &pTask->lock);
- CTG_RET(code);
+ CTG_RET(code);
}
int32_t ctgCallUserCb(void* param) {
SCtgJob* pJob = (SCtgJob*)param;
qDebug("QID:0x%" PRIx64 " ctg start to call user cb with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
-
+
(*pJob->userFp)(&pJob->jobRes, pJob->userParam, pJob->jobResCode);
qDebug("QID:0x%" PRIx64 " ctg end to call user cb", pJob->queryId);
@@ -922,9 +922,9 @@ _return:
//taosSsleep(2);
//qDebug("QID:0x%" PRIx64 " ctg after sleep", pJob->queryId);
-
+
taosAsyncExec(ctgCallUserCb, pJob, NULL);
-
+
CTG_RET(code);
}
@@ -932,7 +932,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
int32_t code = 0;
SCtgDBCache *dbCache = NULL;
SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
@@ -958,38 +958,38 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
}
case TDMT_MND_TABLE_META: {
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
-
+
if (CTG_IS_META_NULL(pOut->metaType)) {
if (CTG_FLAG_IS_STB(flag)) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pName, dbFName);
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (NULL != dbCache) {
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
-
+
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
- *vgId = vgInfo.vgId;
+ *vgId = vgInfo.vgId;
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq));
ctgReleaseVgInfoToCache(pCtg, dbCache);
} else {
SBuildUseDBInput input = {0};
-
+
tstrncpy(input.db, dbFName, tListLen(input.db));
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
-
+
CTG_ERR_JRET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, tReq));
}
return TSDB_CODE_SUCCESS;
}
-
+
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
-
+
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
}
@@ -998,12 +998,12 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
STableMetaOutput* pLastOut = (STableMetaOutput*)pMsgCtx->out;
TSWAP(pLastOut->tbMeta, pOut->tbMeta);
}
-
+
break;
}
case TDMT_VND_TABLE_META: {
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
-
+
if (CTG_IS_META_NULL(pOut->metaType)) {
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
@@ -1013,12 +1013,12 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
if (CTG_FLAG_IS_STB(flag)) {
break;
}
-
+
if (CTG_IS_META_TABLE(pOut->metaType) && TSDB_SUPER_TABLE == pOut->tbMeta->tableType) {
ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pName));
-
+
taosMemoryFreeClear(pOut->tbMeta);
-
+
CTG_RET(ctgGetTbMetaFromMnode(pCtg, pConn, pName, NULL, tReq));
} else if (CTG_IS_META_BOTH(pOut->metaType)) {
int32_t exist = 0;
@@ -1029,13 +1029,13 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
stbCtx.flag = flag;
stbCtx.pName = &stbName;
- taosMemoryFreeClear(pOut->tbMeta);
+ taosMemoryFreeClear(pOut->tbMeta);
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (pOut->tbMeta) {
exist = 1;
}
}
-
+
if (0 == exist) {
TSWAP(pMsgCtx->lastOut, pMsgCtx->out);
CTG_RET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, pOut->dbFName, pOut->tbName, NULL, tReq));
@@ -1056,7 +1056,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
if (CTG_IS_META_BOTH(pOut->metaType)) {
memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
}
-
+
/*
else if (CTG_IS_META_CTABLE(pOut->metaType)) {
SName stbName = *pName;
@@ -1064,7 +1064,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
SCtgTbMetaCtx stbCtx = {0};
stbCtx.flag = flag;
stbCtx.pName = &stbName;
-
+
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (NULL == pOut->tbMeta) {
ctgDebug("stb no longer exist, stbName:%s", stbName.tname);
@@ -1088,7 +1088,7 @@ _return:
if (pTask->res || code) {
ctgHandleTaskEnd(pTask, code);
}
-
+
CTG_RET(code);
}
@@ -1097,7 +1097,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
int32_t code = 0;
SCtgDBCache *dbCache = NULL;
SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx;
@@ -1125,38 +1125,38 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
}
case TDMT_MND_TABLE_META: {
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
-
+
if (CTG_IS_META_NULL(pOut->metaType)) {
if (CTG_FLAG_IS_STB(flag)) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pName, dbFName);
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (NULL != dbCache) {
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
-
+
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
- *vgId = vgInfo.vgId;
+ *vgId = vgInfo.vgId;
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq));
ctgReleaseVgInfoToCache(pCtg, dbCache);
} else {
SBuildUseDBInput input = {0};
-
+
tstrncpy(input.db, dbFName, tListLen(input.db));
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
-
+
CTG_ERR_JRET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, tReq));
}
return TSDB_CODE_SUCCESS;
}
-
+
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
-
+
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
}
@@ -1165,12 +1165,12 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
STableMetaOutput* pLastOut = (STableMetaOutput*)pMsgCtx->out;
TSWAP(pLastOut->tbMeta, pOut->tbMeta);
}
-
+
break;
}
case TDMT_VND_TABLE_META: {
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
-
+
if (CTG_IS_META_NULL(pOut->metaType)) {
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
@@ -1180,12 +1180,12 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
if (CTG_FLAG_IS_STB(flag)) {
break;
}
-
+
if (CTG_IS_META_TABLE(pOut->metaType) && TSDB_SUPER_TABLE == pOut->tbMeta->tableType) {
ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pName));
-
+
taosMemoryFreeClear(pOut->tbMeta);
-
+
CTG_RET(ctgGetTbMetaFromMnode(pCtg, pConn, pName, NULL, tReq));
} else if (CTG_IS_META_BOTH(pOut->metaType)) {
int32_t exist = 0;
@@ -1196,14 +1196,14 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
stbCtx.flag = flag;
stbCtx.pName = &stbName;
- taosMemoryFreeClear(pOut->tbMeta);
+ taosMemoryFreeClear(pOut->tbMeta);
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (pOut->tbMeta) {
ctgDebug("use cached stb meta, tbName:%s", tNameGetTableName(pName));
exist = 1;
}
}
-
+
if (0 == exist) {
TSWAP(pMsgCtx->lastOut, pMsgCtx->out);
CTG_RET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, pOut->dbFName, pOut->tbName, NULL, tReq));
@@ -1224,7 +1224,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
if (CTG_IS_META_BOTH(pOut->metaType)) {
memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
}
-
+
/*
else if (CTG_IS_META_CTABLE(pOut->metaType)) {
SName stbName = *pName;
@@ -1232,7 +1232,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
SCtgTbMetaCtx stbCtx = {0};
stbCtx.flag = flag;
stbCtx.pName = &stbName;
-
+
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (NULL == pOut->tbMeta) {
ctgDebug("stb no longer exist, stbName:%s", stbName.tname);
@@ -1273,7 +1273,7 @@ _return:
if (pTask->res && taskDone) {
ctgHandleTaskEnd(pTask, code);
}
-
+
CTG_RET(code);
}
@@ -1282,7 +1282,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SCtgDbVgCtx* ctx = (SCtgDbVgCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1290,7 +1290,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *
case TDMT_MND_USE_DB: {
SUseDbOutput* pOut = (SUseDbOutput*)pTask->msgCtx.out;
SDBVgInfo* pDb = NULL;
-
+
CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res));
CTG_ERR_JRET(cloneDbVgInfo(pOut->dbVgroup, &pDb));
@@ -1316,7 +1316,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SCtgTbHashCtx* ctx = (SCtgTbHashCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1330,7 +1330,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
}
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
-
+
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
@@ -1354,7 +1354,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SCtgTbHashsCtx* ctx = (SCtgTbHashsCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
SCtgFetch* pFetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
bool taskDone = false;
@@ -1367,7 +1367,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
STablesReq* pReq = taosArrayGet(ctx->pNames, pFetch->dbIdx);
CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true));
-
+
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, pMsgCtx->target, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
@@ -1394,7 +1394,7 @@ _return:
pRes->code = code;
pRes->pRes = NULL;
}
-
+
if (0 == atomic_sub_fetch_32(&ctx->fetchNum, 1)) {
TSWAP(pTask->res, ctx->pResList);
taskDone = true;
@@ -1419,9 +1419,9 @@ int32_t ctgHandleGetTbIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
CTG_ERR_JRET(ctgCloneTableIndex(pOut->pIndex, &pInfo));
pTask->res = pInfo;
- SCtgTbIndexCtx* ctx = pTask->taskCtx;
+ SCtgTbIndexCtx* ctx = pTask->taskCtx;
CTG_ERR_JRET(ctgUpdateTbIndexEnqueue(pTask->pJob->pCtg, (STableIndex**)&pTask->msgCtx.out, false));
-
+
_return:
if (TSDB_CODE_MND_DB_INDEX_NOT_EXIST == code) {
@@ -1438,7 +1438,7 @@ int32_t ctgHandleGetTbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1452,7 +1452,7 @@ int32_t ctgHandleGetDbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1471,7 +1471,7 @@ int32_t ctgHandleGetQnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1485,7 +1485,7 @@ int32_t ctgHandleGetDnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1499,7 +1499,7 @@ int32_t ctgHandleGetIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1513,7 +1513,7 @@ int32_t ctgHandleGetUdfRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *p
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1525,7 +1525,7 @@ int32_t ctgHandleGetUserRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SCtgUserCtx* ctx = (SCtgUserCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
bool pass = false;
SGetUserAuthRsp* pOut = (SGetUserAuthRsp*)pTask->msgCtx.out;
@@ -1573,7 +1573,7 @@ int32_t ctgHandleGetSvrVerRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1583,7 +1583,7 @@ _return:
int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq *tReq, int32_t flag, SName* pName, int32_t* vgId) {
SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
int32_t code = 0;
@@ -1603,7 +1603,7 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq *tReq, int32_t flag, SName* pName, int
SCtgDBCache *dbCache = NULL;
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pName, dbFName);
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (dbCache) {
SVgroupInfo vgInfo = {0};
@@ -1632,7 +1632,7 @@ _return:
}
int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
@@ -1649,14 +1649,14 @@ int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
SCtgTbMetaCtx* pCtx = (SCtgTbMetaCtx*)pTask->taskCtx;
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_ERR_RET(ctgAsyncRefreshTbMeta(&tReq, pCtx->flag, pCtx->pName, &pCtx->vgId));
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbMetasCtx* pCtx = (SCtgTbMetasCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -1670,18 +1670,18 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
CTG_ERR_RET(ctgGetTbMetasFromCache(pCtg, pConn, pCtx, i, &fetchIdx, baseResIdx, pReq->pTables));
baseResIdx += taosArrayGetSize(pReq->pTables);
}
-
+
pCtx->fetchNum = taosArrayGetSize(pCtx->pFetchs);
if (pCtx->fetchNum <= 0) {
TSWAP(pTask->res, pCtx->pResList);
-
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
-
+
pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx));
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
-
+
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
SName* pName = ctgGetFetchName(pCtx->pNames, pFetch);
@@ -1689,19 +1689,19 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = pFetch->fetchIdx;
+ tReq.msgIdx = pFetch->fetchIdx;
CTG_ERR_RET(ctgAsyncRefreshTbMeta(&tReq, pFetch->flag, pName, &pFetch->vgId));
}
-
+
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDBCache *dbCache = NULL;
SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx;
@@ -1710,18 +1710,18 @@ int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache));
if (NULL != dbCache) {
CTG_ERR_JRET(ctgGenerateVgList(pCtg, dbCache->vgCache.vgInfo->vgHash, (SArray**)&pTask->res));
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
-
+
CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0));
} else {
SBuildUseDBInput input = {0};
-
+
tstrncpy(input.db, pCtx->dbFName, tListLen(input.db));
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
@@ -1742,7 +1742,7 @@ _return:
int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDBCache *dbCache = NULL;
SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx;
@@ -1751,7 +1751,7 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache));
if (NULL != dbCache) {
pTask->res = taosMemoryMalloc(sizeof(SVgroupInfo));
@@ -1762,17 +1762,17 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
-
+
CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0));
} else {
SBuildUseDBInput input = {0};
-
+
tstrncpy(input.db, pCtx->dbFName, tListLen(input.db));
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_ERR_RET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, &tReq));
}
@@ -1786,16 +1786,16 @@ _return:
}
int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbHashsCtx* pCtx = (SCtgTbHashsCtx*)pTask->taskCtx;
SCtgDBCache *dbCache = NULL;
- SCtgJob* pJob = pTask->pJob;
+ SCtgJob* pJob = pTask->pJob;
int32_t dbNum = taosArrayGetSize(pCtx->pNames);
int32_t fetchIdx = 0;
int32_t baseResIdx = 0;
int32_t code = 0;
-
+
for (int32_t i = 0; i < dbNum; ++i) {
STablesReq* pReq = taosArrayGet(pCtx->pNames, i);
@@ -1804,7 +1804,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
if (NULL != dbCache) {
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
ctgReleaseVgInfoToCache(pCtg, dbCache);
@@ -1815,21 +1815,21 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
ctgAddFetch(&pCtx->pFetchs, i, -1, &fetchIdx, baseResIdx, 0);
baseResIdx += taosArrayGetSize(pReq->pTables);
- taosArraySetSize(pCtx->pResList, baseResIdx);
+ taosArraySetSize(pCtx->pResList, baseResIdx);
}
}
pCtx->fetchNum = taosArrayGetSize(pCtx->pFetchs);
if (pCtx->fetchNum <= 0) {
TSWAP(pTask->res, pCtx->pResList);
-
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
-
+
pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx));
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
-
+
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
STablesReq* pReq = taosArrayGet(pCtx->pNames, pFetch->dbIdx);
@@ -1837,10 +1837,10 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
SBuildUseDBInput input = {0};
strcpy(input.db, pReq->dbFName);
-
+
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
SCtgTaskReq tReq;
@@ -1854,14 +1854,14 @@ _return:
if (dbCache) {
ctgReleaseVgInfoToCache(pCtg, dbCache);
}
-
+
return code;
}
int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbIndexCtx* pCtx = (SCtgTbIndexCtx*)pTask->taskCtx;
SArray* pRes = NULL;
@@ -1874,18 +1874,18 @@ int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) {
CTG_ERR_RET(ctgReadTbIndexFromCache(pCtg, pCtx->pName, &pRes));
if (pRes) {
pTask->res = pRes;
-
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
-
+
CTG_ERR_RET(ctgGetTbIndexFromMnode(pCtg, pConn, pCtx->pName, NULL, pTask));
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbCfgCtx* pCtx = (SCtgTbCfgCtx*)pTask->taskCtx;
SArray* pRes = NULL;
@@ -1915,7 +1915,7 @@ int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
}
-
+
CTG_ERR_JRET(ctgGetTableCfgFromVnode(pCtg, pConn, pCtx->pName, pCtx->pVgInfo, NULL, pTask));
}
@@ -1926,13 +1926,13 @@ _return:
if (CTG_TASK_LAUNCHED == pTask->status) {
ctgHandleTaskEnd(pTask, code);
}
-
+
CTG_RET(code);
}
int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
@@ -1945,7 +1945,7 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
}
int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
@@ -1959,7 +1959,7 @@ int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) {
int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -1975,7 +1975,7 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SCtgDBCache *dbCache = NULL;
SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -2014,7 +2014,7 @@ _return:
}
int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -2029,7 +2029,7 @@ int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
}
int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -2044,7 +2044,7 @@ int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
}
int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx;
bool inCache = false;
@@ -2054,7 +2054,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
CTG_ERR_RET(ctgChkAuthFromCache(pCtg, pCtx->user.user, pCtx->user.dbFName, pCtx->user.type, &inCache, &pass));
if (inCache) {
pTask->res = taosMemoryCalloc(1, sizeof(bool));
@@ -2062,7 +2062,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
*(bool*)pTask->res = pass;
-
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
@@ -2073,7 +2073,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
}
int32_t ctgLaunchGetSvrVerTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
@@ -2096,7 +2096,7 @@ int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask) {
int32_t ctgGetTbCfgCb(SCtgTask *pTask) {
int32_t code = 0;
-
+
CTG_ERR_JRET(pTask->subRes.code);
SCtgTbCfgCtx* pCtx = (SCtgTbCfgCtx*)pTask->taskCtx;
@@ -2104,7 +2104,7 @@ int32_t ctgGetTbCfgCb(SCtgTask *pTask) {
pCtx->tbType = ((STableMeta*)pTask->subRes.res)->tableType;
} else if (CTG_TASK_GET_DB_VGROUP == pTask->subRes.type) {
SDBVgInfo* pDb = (SDBVgInfo*)pTask->subRes.res;
-
+
pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
}
@@ -2167,7 +2167,7 @@ SCtgAsyncFps gCtgAsyncFps[] = {
int32_t ctgMakeAsyncRes(SCtgJob *pJob) {
int32_t code = 0;
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
-
+
for (int32_t i = 0; i < taskNum; ++i) {
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].dumpResFp)(pTask));
@@ -2180,16 +2180,16 @@ int32_t ctgSearchExistingTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, in
bool equal = false;
SCtgTask* pTask = NULL;
int32_t code = 0;
-
+
CTG_LOCK(CTG_READ, &pJob->taskLock);
-
+
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
for (int32_t i = 0; i < taskNum; ++i) {
pTask = taosArrayGet(pJob->pTasks, i);
if (type != pTask->type) {
continue;
}
-
+
CTG_ERR_JRET((*gCtgAsyncFps[type].compFp)(pTask, param, &equal));
if (equal) {
break;
@@ -2208,7 +2208,7 @@ _return:
int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) {
int32_t code = 0;
-
+
CTG_LOCK(CTG_WRITE, &pSub->lock);
if (CTG_TASK_DONE == pSub->status) {
pTask->subRes.code = pSub->code;
@@ -2216,7 +2216,7 @@ int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) {
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1);
pMsgCtx->pBatchs = pSubMsgCtx->pBatchs;
-
+
CTG_ERR_JRET(pTask->subRes.fp(pTask));
} else {
if (NULL == pSub->pParents) {
@@ -2230,7 +2230,7 @@ _return:
CTG_UNLOCK(CTG_WRITE, &pSub->lock);
- CTG_RET(code);
+ CTG_RET(code);
}
@@ -2242,13 +2242,13 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
ctgClearSubTaskRes(&pTask->subRes);
pTask->subRes.type = type;
pTask->subRes.fp = fp;
-
+
CTG_ERR_RET(ctgSearchExistingTask(pJob, type, param, &subTaskId));
if (subTaskId < 0) {
CTG_ERR_RET(ctgInitTask(pJob, type, param, &subTaskId));
newTask = true;
}
-
+
SCtgTask* pSub = taosArrayGet(pJob->pTasks, subTaskId);
CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask));
@@ -2267,21 +2267,21 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
int32_t ctgLaunchJob(SCtgJob *pJob) {
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
-
+
for (int32_t i = 0; i < taskNum; ++i) {
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));
-
+
pTask->status = CTG_TASK_LAUNCHED;
}
if (taskNum <= 0) {
qDebug("QID:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
-
+
taosAsyncExec(ctgCallUserCb, pJob, NULL);
-#if CTG_BATCH_FETCH
+#if CTG_BATCH_FETCH
} else {
ctgLaunchBatchs(pJob->pCtg, pJob, pJob->pBatchs);
#endif
diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c
index 1b2489acd62bec88eac5bd5aca54a6d2f00ff1ab..18d839e1091e3fc5f1be2939a22345efe8ea8579 100644
--- a/source/libs/command/src/command.c
+++ b/source/libs/command/src/command.c
@@ -17,6 +17,7 @@
#include "catalog.h"
#include "commandInt.h"
#include "scheduler.h"
+#include "systable.h"
#include "tdatablock.h"
#include "tglobal.h"
#include "tgrant.h"
@@ -75,46 +76,41 @@ static SSDataBlock* buildDescResultDataBlock() {
return pBlock;
}
-static void setDescResultIntoDataBlock(SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) {
+static void setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) {
blockDataEnsureCapacity(pBlock, numOfRows);
- pBlock->info.rows = numOfRows;
+ pBlock->info.rows = 0;
// field
SColumnInfoData* pCol1 = taosArrayGet(pBlock->pDataBlock, 0);
- char buf[DESCRIBE_RESULT_FIELD_LEN] = {0};
- for (int32_t i = 0; i < numOfRows; ++i) {
- STR_TO_VARSTR(buf, pMeta->schema[i].name);
- colDataAppend(pCol1, i, buf, false);
- }
-
// Type
SColumnInfoData* pCol2 = taosArrayGet(pBlock->pDataBlock, 1);
- for (int32_t i = 0; i < numOfRows; ++i) {
- STR_TO_VARSTR(buf, tDataTypes[pMeta->schema[i].type].name);
- colDataAppend(pCol2, i, buf, false);
- }
-
// Length
SColumnInfoData* pCol3 = taosArrayGet(pBlock->pDataBlock, 2);
- for (int32_t i = 0; i < numOfRows; ++i) {
- int32_t bytes = getSchemaBytes(pMeta->schema + i);
- colDataAppend(pCol3, i, (const char*)&bytes, false);
- }
-
// Note
SColumnInfoData* pCol4 = taosArrayGet(pBlock->pDataBlock, 3);
+ char buf[DESCRIBE_RESULT_FIELD_LEN] = {0};
for (int32_t i = 0; i < numOfRows; ++i) {
+ if (invisibleColumn(sysInfoUser, pMeta->tableType, pMeta->schema[i].flags)) {
+ continue;
+ }
+ STR_TO_VARSTR(buf, pMeta->schema[i].name);
+ colDataAppend(pCol1, pBlock->info.rows, buf, false);
+ STR_TO_VARSTR(buf, tDataTypes[pMeta->schema[i].type].name);
+ colDataAppend(pCol2, pBlock->info.rows, buf, false);
+ int32_t bytes = getSchemaBytes(pMeta->schema + i);
+ colDataAppend(pCol3, pBlock->info.rows, (const char*)&bytes, false);
STR_TO_VARSTR(buf, i >= pMeta->tableInfo.numOfColumns ? "TAG" : "");
- colDataAppend(pCol4, i, buf, false);
+ colDataAppend(pCol4, pBlock->info.rows, buf, false);
+ ++(pBlock->info.rows);
}
}
-static int32_t execDescribe(SNode* pStmt, SRetrieveTableRsp** pRsp) {
+static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) {
SDescribeStmt* pDesc = (SDescribeStmt*)pStmt;
int32_t numOfRows = TABLE_TOTAL_COL_NUM(pDesc->pMeta);
SSDataBlock* pBlock = buildDescResultDataBlock();
- setDescResultIntoDataBlock(pBlock, numOfRows, pDesc->pMeta);
+ setDescResultIntoDataBlock(sysInfoUser, pBlock, numOfRows, pDesc->pMeta);
return buildRetrieveTableRsp(pBlock, DESCRIBE_RESULT_COLS, pRsp);
}
@@ -475,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p
len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName);
appendColumnFields(buf2, &len, pCfg);
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")");
+ appendTableOptions(buf2, &len, pDbCfg, pCfg);
}
varDataLen(buf2) = len;
@@ -665,10 +662,10 @@ static int32_t execSelectWithoutFrom(SSelectStmt* pSelect, SRetrieveTableRsp** p
return code;
}
-int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) {
+int32_t qExecCommand(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) {
switch (nodeType(pStmt)) {
case QUERY_NODE_DESCRIBE_STMT:
- return execDescribe(pStmt, pRsp);
+ return execDescribe(sysInfoUser, pStmt, pRsp);
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
return execResetQueryCache();
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c
index afc09262de377bd6515a03d75e9a8c97f6b61882..967c682b0bb701502dff90081fba8973a34bd22a 100644
--- a/source/libs/command/src/explain.c
+++ b/source/libs/command/src/explain.c
@@ -1570,12 +1570,9 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
SColumnInfoData *pInfoData = taosArrayGet(pBlock->pDataBlock, 0);
- char buf[1024] = {0};
for (int32_t i = 0; i < rowNum; ++i) {
SQueryExplainRowInfo *row = taosArrayGet(pCtx->rows, i);
- varDataCopy(buf, row->buf);
- ASSERT(varDataTLen(row->buf) == row->len);
- colDataAppend(pInfoData, i, buf, false);
+ colDataAppend(pInfoData, i, row->buf, false);
}
pBlock->info.rows = rowNum;
diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h
index 4da4747108d5b459eac03546d6f2cc661c3760a1..9e7fcc222788e16f60252727ba7ca7c911366e9b 100644
--- a/source/libs/executor/inc/executil.h
+++ b/source/libs/executor/inc/executil.h
@@ -22,6 +22,13 @@
#include "tbuffer.h"
#include "tcommon.h"
#include "tpagedbuf.h"
+#include "tsimplehash.h"
+
+#define T_LONG_JMP(_obj, _c) \
+ do { \
+ ASSERT((_c) != -1); \
+ longjmp((_obj), (_c)); \
+ } while (0);
#define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \
do { \
@@ -80,11 +87,9 @@ struct SqlFunctionCtx;
size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
void initResultRowInfo(SResultRowInfo* pResultRowInfo);
-void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo);
void initResultRow(SResultRow* pResultRow);
void closeResultRow(SResultRow* pResultRow);
-bool isResultRowClosed(SResultRow* pResultRow);
struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset);
@@ -102,7 +107,7 @@ static FORCE_INLINE void setResultBufPageDirty(SDiskbasedBuf* pBuf, SResultRowPo
setBufPageDirty(pPage, true);
}
-void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int32_t order);
+void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order);
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo);
void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList);
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index fb4eac991f4d64e2c5477e3b102395ad6c83550b..3bed81f65784c2355252006859dd7cb14902e53c 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -122,7 +122,7 @@ typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* res
typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr);
typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr);
-typedef void (*__optr_close_fn_t)(void* param, int32_t num);
+typedef void (*__optr_close_fn_t)(void* param);
typedef int32_t (*__optr_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
typedef struct STaskIdInfo {
@@ -142,7 +142,12 @@ typedef struct {
//TODO remove prepareStatus
STqOffsetVal prepareStatus; // for tmq
STqOffsetVal lastStatus; // for tmq
- void* metaBlk; // for tmq fetching meta
+ SMqMetaRsp metaRsp; // for tmq fetching meta
+ int8_t returned;
+ int64_t snapshotVer;
+
+ SSchemaWrapper *schema;
+ char tbName[TSDB_TABLE_NAME_LEN];
SSDataBlock* pullOverBlk; // for streaming
SWalFilterCond cond;
int64_t lastScanUid;
@@ -150,6 +155,7 @@ typedef struct {
SQueryTableDataCond tableCond;
int64_t recoverStartVer;
int64_t recoverEndVer;
+ SStreamState* pState;
} SStreamTaskInfo;
typedef struct {
@@ -206,6 +212,7 @@ typedef struct SExprSupp {
typedef struct SOperatorInfo {
uint16_t operatorType;
+ int16_t resultDataBlockId;
bool blocking; // block operator or not
uint8_t status; // denote if current operator is completed
char* name; // name, for debug purpose
@@ -217,7 +224,6 @@ typedef struct SOperatorInfo {
struct SOperatorInfo** pDownstream; // downstram pointer list
int32_t numOfDownstream; // number of downstream. The value is always ONE expect for join operator
SOperatorFpSet fpSet;
- int16_t resultDataBlockId;
} SOperatorInfo;
typedef enum {
@@ -296,10 +302,11 @@ enum {
};
typedef struct SAggSupporter {
- SHashObj* pResultRowHashTable; // quick locate the window object for each result
- char* keyBuf; // window key buffer
- SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
- int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ SSHashObj* pResultRowHashTable; // quick locate the window object for each result
+ char* keyBuf; // window key buffer
+ SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // current write page id
} SAggSupporter;
typedef struct {
@@ -324,7 +331,6 @@ typedef struct STableScanInfo {
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag;
-// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
SSampleExecInfo sample; // sample execution info
int32_t currentGroupId;
int32_t currentTable;
@@ -404,6 +410,7 @@ typedef enum EStreamScanMode {
STREAM_SCAN_FROM_READERHANDLE = 1,
STREAM_SCAN_FROM_RES,
STREAM_SCAN_FROM_UPDATERES,
+ STREAM_SCAN_FROM_DELETERES,
STREAM_SCAN_FROM_DATAREADER_RETRIEVE,
STREAM_SCAN_FROM_DATAREADER_RANGE,
} EStreamScanMode;
@@ -428,15 +435,28 @@ typedef struct SStreamAggSupporter {
char* pKeyBuf; // window key buffer
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // buffer page that is active
SSDataBlock* pScanBlock;
} SStreamAggSupporter;
-typedef struct SessionWindowSupporter {
+typedef struct SWindowSupporter {
SStreamAggSupporter* pStreamAggSup;
int64_t gap;
uint16_t parentType;
SAggSupporter* pIntervalAggSup;
-} SessionWindowSupporter;
+} SWindowSupporter;
+
+typedef struct SPartitionBySupporter {
+ SArray* pGroupCols; // group by columns, SArray
+ SArray* pGroupColVals; // current group column values, SArray
+ char* keyBuf; // group by keys for hash
+ bool needCalc; // partition by column
+} SPartitionBySupporter;
+
+typedef struct SPartitionDataInfo {
+ uint64_t groupId;
+ SArray* rowIds;
+} SPartitionDataInfo;
typedef struct STimeWindowSupp {
int8_t calTrigger;
@@ -471,7 +491,9 @@ typedef struct SStreamScanInfo {
SOperatorInfo* pStreamScanOp;
SOperatorInfo* pTableScanOp;
SArray* childIds;
- SessionWindowSupporter sessionSup;
+ SWindowSupporter windowSup;
+ SPartitionBySupporter partitionSup;
+ SExprSupp* pPartScalarSup;
bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
int32_t scanWinIndex; // for state operator
int32_t pullDataResIndex;
@@ -482,12 +504,24 @@ typedef struct SStreamScanInfo {
STimeWindowAggSupp twAggSup;
SSDataBlock* pUpdateDataRes;
// status for tmq
- // SSchemaWrapper schema;
- SNodeList* pGroupTags;
- SNode* pTagCond;
- SNode* pTagIndexCond;
+ SNodeList* pGroupTags;
+ SNode* pTagCond;
+ SNode* pTagIndexCond;
} SStreamScanInfo;
+typedef struct {
+ // int8_t subType;
+ // bool withMeta;
+ // int64_t suid;
+ // int64_t snapVersion;
+ // void *metaInfo;
+ // void *dataInfo;
+ SVnode* vnode;
+ SSDataBlock pRes; // result SSDataBlock
+ STsdbReader* dataReader;
+ SSnapContext* sContext;
+} SStreamRawScanInfo;
+
typedef struct SSysTableScanInfo {
SRetrieveMetaTableRsp* pRsp;
SRetrieveTableReq req;
@@ -496,6 +530,7 @@ typedef struct SSysTableScanInfo {
SReadHandle readHandle;
int32_t accountId;
const char* pUser;
+ bool sysInfo;
bool showRewrite;
SNode* pCondition; // db_name filter condition, to discard data that are not in current database
SMTbCursor* pCur; // cursor for iterate the local table meta store.
@@ -510,14 +545,14 @@ typedef struct SBlockDistInfo {
SSDataBlock* pResBlock;
void* pHandle;
SReadHandle readHandle;
- uint64_t uid; // table uid
+ uint64_t uid; // table uid
} SBlockDistInfo;
// todo remove this
typedef struct SOptrBasicInfo {
- SResultRowInfo resultRowInfo;
- SSDataBlock* pRes;
- bool mergeResultBlock;
+ SResultRowInfo resultRowInfo;
+ SSDataBlock* pRes;
+ bool mergeResultBlock;
} SOptrBasicInfo;
typedef struct SIntervalAggOperatorInfo {
@@ -671,7 +706,6 @@ typedef struct SPartitionOperatorInfo {
SArray* sortedGroupArray; // SDataGroupInfo sorted by group id
int32_t groupIndex; // group index
int32_t pageIndex; // page index of current group
- SSDataBlock* pUpdateRes;
SExprSupp scalarSup;
} SPartitionOperatorInfo;
@@ -723,8 +757,8 @@ typedef struct SStreamSessionAggOperatorInfo {
SSDataBlock* pWinBlock; // window result
SqlFunctionCtx* pDummyCtx; // for combine
SSDataBlock* pDelRes; // delete result
- bool returnDelete;
SSDataBlock* pUpdateRes; // update window
+ bool returnUpdate;
SHashObj* pStDeleted;
void* pDelIterator;
SArray* pChildren; // cache for children's result; final stream operator
@@ -733,6 +767,16 @@ typedef struct SStreamSessionAggOperatorInfo {
bool ignoreExpiredData;
} SStreamSessionAggOperatorInfo;
+typedef struct SStreamPartitionOperatorInfo {
+ SOptrBasicInfo binfo;
+ SPartitionBySupporter partitionSup;
+ SExprSupp scalarSup;
+ SHashObj* pPartitions;
+ void* parIte;
+ SSDataBlock* pInputDataBlock;
+ int32_t tsColIndex;
+} SStreamPartitionOperatorInfo;
+
typedef struct STimeSliceOperatorInfo {
SSDataBlock* pRes;
STimeWindow win;
@@ -907,21 +951,15 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_t numStreams, SMergePhysiNode* pMergePhysiNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t num, SArray* pSortInfo, SArray* pGroupInfo, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, bool isStream);
-
-SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- bool mergeResultBlock, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- SNode* pCondition, bool mergeResultBlocks, SExecTaskInfo* pTaskInfo);
-
+SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode,
@@ -929,22 +967,21 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition,
SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid, SBlockDistScanPhysiNode* pBlockScanNode,
- SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid,
+ SBlockDistScanPhysiNode* pBlockScanNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond,
SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
- SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId,
- SColumn* pStateKeyCol, SNode* pCondition, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode,
SExecTaskInfo* pTaskInfo);
@@ -955,10 +992,6 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-#if 0
-SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntimeEnv* pRuntimeEnv);
-#endif
-
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
int32_t numOfOutput, SArray* pPseudoList);
@@ -1006,7 +1039,7 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size);
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
@@ -1016,8 +1049,9 @@ bool functionNeedToExecute(SqlFunctionCtx* pCtx);
bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
-void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid);
+void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp);
void printDataBlock(SSDataBlock* pBlock, const char* flag);
+uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset,
diff --git a/source/libs/executor/inc/tsimplehash.h b/source/libs/executor/inc/tsimplehash.h
index 4c5a80e2f1954812a81665954d3dc448467f6ffc..27191e3b7e674df4dcec9dabc7b8cc6fbb35f9f2 100644
--- a/source/libs/executor/inc/tsimplehash.h
+++ b/source/libs/executor/inc/tsimplehash.h
@@ -28,7 +28,7 @@ typedef void (*_hash_free_fn_t)(void *);
/**
* @brief single thread hash
- *
+ *
*/
typedef struct SSHashObj SSHashObj;
@@ -52,13 +52,13 @@ int32_t tSimpleHashPrint(const SSHashObj *pHashObj);
/**
* @brief put element into hash table, if the element with the same key exists, update it
- *
- * @param pHashObj
- * @param key
- * @param keyLen
- * @param data
- * @param dataLen
- * @return int32_t
+ *
+ * @param pHashObj
+ * @param key
+ * @param keyLen
+ * @param data
+ * @param dataLen
+ * @return int32_t
*/
int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen);
@@ -80,6 +80,18 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key, size_t keyLen);
*/
int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen);
+/**
+ * remove item with the specified key during hash iterate
+ *
+ * @param pHashObj
+ * @param key
+ * @param keyLen
+ * @param pIter
+ * @param iter
+ * @return int32_t
+ */
+int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t keyLen, void **pIter, int32_t *iter);
+
/**
* Clear the hash table.
* @param pHashObj
@@ -99,13 +111,27 @@ void tSimpleHashCleanup(SSHashObj *pHashObj);
*/
size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj);
+#pragma pack(push, 4)
+typedef struct SHNode{
+ struct SHNode *next;
+ uint32_t keyLen : 20;
+ uint32_t dataLen : 12;
+ char data[];
+} SHNode;
+#pragma pack(pop)
+
/**
* Get the corresponding key information for a given data in hash table
* @param data
* @param keyLen
* @return
*/
-void *tSimpleHashGetKey(void *data, size_t* keyLen);
+static FORCE_INLINE void *tSimpleHashGetKey(void *data, size_t *keyLen) {
+ SHNode *node = (SHNode *)((char *)data - offsetof(SHNode, data));
+ if (keyLen) *keyLen = node->keyLen;
+
+ return POINTER_SHIFT(data, node->dataLen);
+}
/**
* Create the hash table iterator
@@ -116,17 +142,6 @@ void *tSimpleHashGetKey(void *data, size_t* keyLen);
*/
void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter);
-/**
- * Create the hash table iterator
- *
- * @param pHashObj
- * @param data
- * @param key
- * @param iter
- * @return void*
- */
-void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, int32_t *iter);
-
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c
index 94e4384b3025f0d2ecbbaafd9f92ad10aa84b926..94d9d0cadbd1cf21ac8303a4bee7b86da9695f3c 100644
--- a/source/libs/executor/src/cachescanoperator.c
+++ b/source/libs/executor/src/cachescanoperator.c
@@ -24,26 +24,28 @@
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
-#include "executorInt.h"
-static SSDataBlock* doScanLastrow(SOperatorInfo* pOperator);
-static void destroyLastrowScanOperator(void* param, int32_t numOfOutput);
+static SSDataBlock* doScanCache(SOperatorInfo* pOperator);
+static void destroyLastrowScanOperator(void* param);
static int32_t extractTargetSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds);
-SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle,
+ SExecTaskInfo* pTaskInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
SLastrowScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SLastrowScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _error;
}
pInfo->readHandle = *readHandle;
- pInfo->pRes = createResDataBlock(pScanNode->scan.node.pOutputDataBlockDesc);
+ pInfo->pRes = createResDataBlock(pScanNode->scan.node.pOutputDataBlockDesc);
int32_t numOfCols = 0;
pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->scan.pScanCols, pScanNode->scan.node.pOutputDataBlockDesc, &numOfCols,
COL_MATCH_FROM_COL_ID);
- int32_t code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds);
+ code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -56,13 +58,17 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
// partition by tbname
if (taosArrayGetSize(pTableList->pGroupList) == taosArrayGetSize(pTableList->pTableList)) {
- pInfo->retrieveType = LASTROW_RETRIEVE_TYPE_ALL;
- tsdbLastRowReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList,
- taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
+ pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_ALL|CACHESCAN_RETRIEVE_LAST_ROW;
+ code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList,
+ taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
pInfo->pBufferredRes = createOneDataBlock(pInfo->pRes, false);
blockDataEnsureCapacity(pInfo->pBufferredRes, pOperator->resultInfo.capacity);
} else { // by tags
- pInfo->retrieveType = LASTROW_RETRIEVE_TYPE_SINGLE;
+ pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE|CACHESCAN_RETRIEVE_LAST_ROW;
}
if (pScanNode->scan.pScanPseudoCols != NULL) {
@@ -81,19 +87,19 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->fpSet =
- createOperatorFpSet(operatorDummyOpenFn, doScanLastrow, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL);
+ createOperatorFpSet(operatorDummyOpenFn, doScanCache, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL);
pOperator->cost.openCost = 0;
return pOperator;
_error:
- pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
- taosMemoryFree(pInfo);
+ pTaskInfo->code = code;
+ destroyLastrowScanOperator(pInfo);
taosMemoryFree(pOperator);
return NULL;
}
-SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
+SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
@@ -110,14 +116,14 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
blockDataCleanup(pInfo->pRes);
// check if it is a group by tbname
- if (pInfo->retrieveType == LASTROW_RETRIEVE_TYPE_ALL) {
+ if ((pInfo->retrieveType & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) {
if (pInfo->indexOfBufferedRes >= pInfo->pBufferredRes->info.rows) {
blockDataCleanup(pInfo->pBufferredRes);
taosArrayClear(pInfo->pUidList);
- int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList);
+ int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// check for tag values
@@ -173,11 +179,11 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
while (pInfo->currentGroupIndex < totalGroups) {
SArray* pGroupTableList = taosArrayGetP(pTableList->pGroupList, pInfo->currentGroupIndex);
- tsdbLastRowReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList,
+ tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList,
taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
taosArrayClear(pInfo->pUidList);
- int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);
+ int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
@@ -201,7 +207,7 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
}
}
- tsdbLastrowReaderClose(pInfo->pLastrowReader);
+ tsdbCacherowsReaderClose(pInfo->pLastrowReader);
return pInfo->pRes;
}
}
@@ -211,7 +217,7 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
}
}
-void destroyLastrowScanOperator(void* param, int32_t numOfOutput) {
+void destroyLastrowScanOperator(void* param) {
SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param;
blockDataDestroy(pInfo->pRes);
taosMemoryFreeClear(param);
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 2781fee9565c278db6f9173ba596fbbc6abaf9c8..715eb4780c3129e3aa9d444d06b6878237167a45 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -31,20 +31,6 @@ void initResultRowInfo(SResultRowInfo* pResultRowInfo) {
pResultRowInfo->cur.pageId = -1;
}
-void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo) {
- if (pResultRowInfo == NULL) {
- return;
- }
-
- for (int32_t i = 0; i < pResultRowInfo->size; ++i) {
- // if (pResultRowInfo->pResult[i]) {
- // taosMemoryFreeClear(pResultRowInfo->pResult[i]->key);
- // }
- }
-}
-
-bool isResultRowClosed(SResultRow* pRow) { return (pRow->closed == true); }
-
void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; }
// TODO refactor: use macro
@@ -60,8 +46,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
rowSize += pCtx[i].resDataInfo.interBufSize;
}
- rowSize +=
- (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData)
+ rowSize += (numOfOutput * sizeof(bool));
+ // expand rowSize to mark if col is null for top/bottom result(saveTupleData)
return rowSize;
}
@@ -97,7 +83,7 @@ int32_t resultrowComparAsc(const void* p1, const void* p2) {
static int32_t resultrowComparDesc(const void* p1, const void* p2) { return resultrowComparAsc(p2, p1); }
-void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int32_t order) {
+void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order) {
if (pGroupResInfo->pRows != NULL) {
taosArrayDestroy(pGroupResInfo->pRows);
}
@@ -106,9 +92,10 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
void* pData = NULL;
pGroupResInfo->pRows = taosArrayInit(10, POINTER_BYTES);
- size_t keyLen = 0;
- while ((pData = taosHashIterate(pHashmap, pData)) != NULL) {
- void* key = taosHashGetKey(pData, &keyLen);
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pData, &keyLen);
SResKeyPos* p = taosMemoryMalloc(keyLen + sizeof(SResultRowPosition));
@@ -298,17 +285,17 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle,
return TSDB_CODE_SUCCESS;
}
-typedef struct tagFilterAssist{
- SHashObj *colHash;
+typedef struct tagFilterAssist {
+ SHashObj* colHash;
int32_t index;
- SArray *cInfoList;
-}tagFilterAssist;
+ SArray* cInfoList;
+} tagFilterAssist;
static EDealRes getColumn(SNode** pNode, void* pContext) {
SColumnNode* pSColumnNode = NULL;
if (QUERY_NODE_COLUMN == nodeType((*pNode))) {
pSColumnNode = *(SColumnNode**)pNode;
- }else if(QUERY_NODE_FUNCTION == nodeType((*pNode))){
+ } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) {
SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode);
if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) {
pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
@@ -321,24 +308,26 @@ static EDealRes getColumn(SNode** pNode, void* pContext) {
pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE;
nodesDestroyNode(*pNode);
*pNode = (SNode*)pSColumnNode;
- }else{
+ } else {
return DEAL_RES_CONTINUE;
}
- }else{
+ } else {
return DEAL_RES_CONTINUE;
}
- tagFilterAssist *pData = (tagFilterAssist *)pContext;
- void *data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId));
- if(!data){
+ tagFilterAssist* pData = (tagFilterAssist*)pContext;
+ void* data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId));
+ if (!data) {
taosHashPut(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode)));
pSColumnNode->slotId = pData->index++;
- SColumnInfo cInfo = {.colId = pSColumnNode->colId, .type = pSColumnNode->node.resType.type, .bytes = pSColumnNode->node.resType.bytes};
+ SColumnInfo cInfo = {.colId = pSColumnNode->colId,
+ .type = pSColumnNode->node.resType.type,
+ .bytes = pSColumnNode->node.resType.bytes};
#if TAG_FILTER_DEBUG
qDebug("tagfilter build column info, slotId:%d, colId:%d, type:%d", pSColumnNode->slotId, cInfo.colId, cInfo.type);
#endif
taosArrayPush(pData->cInfoList, &cInfo);
- }else{
+ } else {
SColumnNode* col = *(SColumnNode**)data;
pSColumnNode->slotId = col->slotId;
}
@@ -353,14 +342,14 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara
return terrno;
}
- pColumnData->info.type = pType->type;
- pColumnData->info.bytes = pType->bytes;
- pColumnData->info.scale = pType->scale;
+ pColumnData->info.type = pType->type;
+ pColumnData->info.bytes = pType->bytes;
+ pColumnData->info.scale = pType->scale;
pColumnData->info.precision = pType->precision;
int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+ terrno = code;
taosMemoryFree(pColumnData);
return terrno;
}
@@ -370,27 +359,28 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara
return TSDB_CODE_SUCCESS;
}
-static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond){
- int32_t code = TSDB_CODE_SUCCESS;
- SArray* pBlockList = NULL;
+static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
- SHashObj * tags = NULL;
+ SHashObj* tags = NULL;
SScalarParam output = {0};
tagFilterAssist ctx = {0};
+
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
- if(ctx.colHash == NULL){
+ if (ctx.colHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
ctx.index = 0;
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
- if(ctx.cInfoList == NULL){
+ if (ctx.cInfoList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
- nodesRewriteExprPostOrder(&pTagCond, getColumn, (void *)&ctx);
+ nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx);
pResBlock = createDataBlock();
if (pResBlock == NULL) {
@@ -404,7 +394,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
blockDataAppendColInfo(pResBlock, &colInfo);
}
-// int64_t stt = taosGetTimestampUs();
+ // int64_t stt = taosGetTimestampUs();
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
code = metaGetTableTags(metaHandle, suid, uidList, tags);
if (code != TSDB_CODE_SUCCESS) {
@@ -414,11 +404,11 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
}
int32_t rows = taosArrayGetSize(uidList);
- if(rows == 0){
+ if (rows == 0) {
goto end;
}
-// int64_t stt1 = taosGetTimestampUs();
-// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
+ // int64_t stt1 = taosGetTimestampUs();
+ // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
code = blockDataEnsureCapacity(pResBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
@@ -426,46 +416,46 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
goto end;
}
-// int64_t st = taosGetTimestampUs();
+ // int64_t st = taosGetTimestampUs();
for (int32_t i = 0; i < rows; i++) {
int64_t* uid = taosArrayGet(uidList, i);
- for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){
+ for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
- if(pColInfo->info.colId == -1){ // tbname
+ if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
metaGetTableNameByUid(metaHandle, *uid, str);
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
- qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2);
+ qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
- }else{
+ } else {
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
ASSERT(tag);
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
- if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){
+ if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppend(pColInfo, i, p, true);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
- char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
+ char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
- qDebug("tagfilter varch:%s", tmp+2);
+ qDebug("tagfilter varch:%s", tmp + 2);
#endif
taosMemoryFree(tmp);
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
- if(pColInfo->info.type == TSDB_DATA_TYPE_INT){
+ if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
- }else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){
- qDebug("tagfilter double:%f", *(double *)(&tagVal.i64));
+ } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
+ qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
@@ -474,8 +464,8 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
}
pResBlock->info.rows = rows;
-// int64_t st1 = taosGetTimestampUs();
-// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
+ // int64_t st1 = taosGetTimestampUs();
+ // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
pBlockList = taosArrayInit(2, POINTER_BYTES);
taosArrayPush(pBlockList, &pResBlock);
@@ -483,17 +473,20 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
code = createResultData(&type, rows, &output);
if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
qError("failed to create result, reason:%s", tstrerror(code));
+ terrno = code;
goto end;
}
code = scalarCalculate(pTagCond, pBlockList, &output);
- if(code != TSDB_CODE_SUCCESS){
+ if (code != TSDB_CODE_SUCCESS) {
qError("failed to calculate scalar, reason:%s", tstrerror(code));
terrno = code;
+ goto end;
}
-// int64_t st2 = taosGetTimestampUs();
-// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
+ // int64_t st2 = taosGetTimestampUs();
+ // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
end:
taosHashCleanup(tags);
@@ -505,43 +498,43 @@ end:
}
static void releaseColInfoData(void* pCol) {
- if(pCol){
- SColumnInfoData* col = (SColumnInfoData*) pCol;
+ if (pCol) {
+ SColumnInfoData* col = (SColumnInfoData*)pCol;
colDataDestroy(col);
taosMemoryFree(col);
}
}
-int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo){
- int32_t code = TSDB_CODE_SUCCESS;
- SArray *pBlockList = NULL;
- SSDataBlock *pResBlock = NULL;
- SHashObj *tags = NULL;
- SArray *uidList = NULL;
- void *keyBuf = NULL;
- SArray *groupData = NULL;
+int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SArray* pBlockList = NULL;
+ SSDataBlock* pResBlock = NULL;
+ SHashObj* tags = NULL;
+ SArray* uidList = NULL;
+ void* keyBuf = NULL;
+ SArray* groupData = NULL;
int32_t rows = taosArrayGetSize(pTableListInfo->pTableList);
- if(rows == 0){
+ if (rows == 0) {
return TDB_CODE_SUCCESS;
}
tagFilterAssist ctx = {0};
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
- if(ctx.colHash == NULL){
+ if (ctx.colHash == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
ctx.index = 0;
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
- if(ctx.cInfoList == NULL){
+ if (ctx.cInfoList == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
- SNode* pNode = NULL;
+ SNode* pNode = NULL;
FOREACH(pNode, group) {
- nodesRewriteExprPostOrder(&pNode, getColumn, (void *)&ctx);
+ nodesRewriteExprPostOrder(&pNode, getColumn, (void*)&ctx);
REPLACE_NODE(pNode);
}
@@ -563,61 +556,61 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
taosArrayPush(uidList, &pkeyInfo->uid);
}
-// int64_t stt = taosGetTimestampUs();
+ // int64_t stt = taosGetTimestampUs();
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
-// int64_t stt1 = taosGetTimestampUs();
-// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
+ // int64_t stt1 = taosGetTimestampUs();
+ // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
code = blockDataEnsureCapacity(pResBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
-// int64_t st = taosGetTimestampUs();
+ // int64_t st = taosGetTimestampUs();
for (int32_t i = 0; i < rows; i++) {
int64_t* uid = taosArrayGet(uidList, i);
- for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){
+ for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
- if(pColInfo->info.colId == -1){ // tbname
+ if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
metaGetTableNameByUid(metaHandle, *uid, str);
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
- qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2);
+ qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
- }else{
+ } else {
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
ASSERT(tag);
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
- if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){
+ if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppend(pColInfo, i, p, true);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
- char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
+ char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
- qDebug("tagfilter varch:%s", tmp+2);
+ qDebug("tagfilter varch:%s", tmp + 2);
#endif
taosMemoryFree(tmp);
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
- if(pColInfo->info.type == TSDB_DATA_TYPE_INT){
+ if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
- }else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){
- qDebug("tagfilter double:%f", *(double *)(&tagVal.i64));
+ } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
+ qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
@@ -626,8 +619,8 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
}
pResBlock->info.rows = rows;
-// int64_t st1 = taosGetTimestampUs();
-// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
+ // int64_t st1 = taosGetTimestampUs();
+ // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
pBlockList = taosArrayInit(2, POINTER_BYTES);
taosArrayPush(pBlockList, &pResBlock);
@@ -641,7 +634,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
break;
case QUERY_NODE_COLUMN:
case QUERY_NODE_OPERATOR:
- case QUERY_NODE_FUNCTION:{
+ case QUERY_NODE_FUNCTION: {
SExprNode* expNode = (SExprNode*)pNode;
code = createResultData(&expNode->resType, rows, &output);
if (code != TSDB_CODE_SUCCESS) {
@@ -653,16 +646,16 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
code = TSDB_CODE_OPS_NOT_SUPPORT;
goto end;
}
- if(nodeType(pNode) == QUERY_NODE_COLUMN){
- SColumnNode* pSColumnNode = (SColumnNode*)pNode;
+ if (nodeType(pNode) == QUERY_NODE_COLUMN) {
+ SColumnNode* pSColumnNode = (SColumnNode*)pNode;
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, pSColumnNode->slotId);
code = colDataAssign(output.columnData, pColInfo, rows, NULL);
- }else if(nodeType(pNode) == QUERY_NODE_VALUE){
+ } else if (nodeType(pNode) == QUERY_NODE_VALUE) {
continue;
- }else{
+ } else {
code = scalarCalculate(pNode, pBlockList, &output);
}
- if(code != TSDB_CODE_SUCCESS){
+ if (code != TSDB_CODE_SUCCESS) {
releaseColInfoData(output.columnData);
goto end;
}
@@ -670,7 +663,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
}
int32_t keyLen = 0;
- SNode* node;
+ SNode* node;
FOREACH(node, group) {
SExprNode* pExpr = (SExprNode*)node;
keyLen += pExpr->resType.bytes;
@@ -684,12 +677,12 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
- for(int i = 0; i < rows; i++){
+ for (int i = 0; i < rows; i++) {
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
char* isNull = (char*)keyBuf;
char* pStart = (char*)keyBuf + sizeof(int8_t) * LIST_LENGTH(group);
- for(int j = 0; j < taosArrayGetSize(groupData); j++){
+ for (int j = 0; j < taosArrayGetSize(groupData); j++) {
SColumnInfoData* pValue = (SColumnInfoData*)taosArrayGetP(groupData, j);
if (colDataIsNull_s(pValue, i)) {
@@ -702,7 +695,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
code = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
goto end;
}
- if(tTagIsJsonNull(data)){
+ if (tTagIsJsonNull(data)) {
isNull[j] = 1;
continue;
}
@@ -724,10 +717,10 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t));
}
-// int64_t st2 = taosGetTimestampUs();
-// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
+ // int64_t st2 = taosGetTimestampUs();
+ // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
- end:
+end:
taosMemoryFreeClear(keyBuf);
taosHashCleanup(tags);
taosHashCleanup(ctx.colHash);
@@ -757,7 +750,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
SIndexMetaArg metaArg = {
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid};
-// int64_t stt = taosGetTimestampUs();
+ // int64_t stt = taosGetTimestampUs();
SIdxFltStatus status = SFLT_NOT_INDEX;
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
if (code != 0 || status == SFLT_NOT_INDEX) {
@@ -765,23 +758,25 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
code = TDB_CODE_SUCCESS;
}
-// int64_t stt1 = taosGetTimestampUs();
-// qDebug("generate table list, cost:%ld us", stt1-stt);
- }else if(!pTagCond){
+ // int64_t stt1 = taosGetTimestampUs();
+ // qDebug("generate table list, cost:%ld us", stt1-stt);
+ } else if (!pTagCond) {
vnodeGetCtbIdList(pVnode, pScanNode->suid, res);
}
} else { // Create one table group.
- if(metaIsTableExist(metaHandle, tableUid)){
+ if (metaIsTableExist(metaHandle, tableUid)) {
taosArrayPush(res, &tableUid);
}
}
if (pTagCond) {
+ terrno = TDB_CODE_SUCCESS;
SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond);
- if(terrno != TDB_CODE_SUCCESS){
+ if (terrno != TDB_CODE_SUCCESS) {
colDataDestroy(pColInfoData);
taosMemoryFreeClear(pColInfoData);
taosArrayDestroy(res);
+ qError("failed to getColInfoResult, code: %s", tstrerror(terrno));
return terrno;
}
@@ -839,7 +834,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) {
int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) {
SMetaReader mr = {0};
metaReaderInit(&mr, pMeta, 0);
- if(metaGetTableEntryByUid(&mr, uid) != 0){ // table not exist
+ if (metaGetTableEntryByUid(&mr, uid) != 0) { // table not exist
metaReaderClear(&mr);
return TSDB_CODE_PAR_TABLE_NOT_EXIST;
}
@@ -943,15 +938,17 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
for (int32_t i = 0; i < numOfCols; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pNodeList, i);
- SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
-
- SColMatchInfo c = {0};
- c.output = true;
- c.colId = pColNode->colId;
- c.srcSlotId = pColNode->slotId;
- c.matchType = type;
- c.targetSlotId = pNode->slotId;
- taosArrayPush(pList, &c);
+ if (nodeType(pNode->pExpr) == QUERY_NODE_COLUMN) {
+ SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
+
+ SColMatchInfo c = {0};
+ c.output = true;
+ c.colId = pColNode->colId;
+ c.srcSlotId = pColNode->slotId;
+ c.matchType = type;
+ c.targetSlotId = pNode->slotId;
+ taosArrayPush(pList, &c);
+ }
}
*numOfOutputCols = 0;
@@ -997,7 +994,7 @@ static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, i
return s;
}
-static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) {
+static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType, EColumnType colType) {
SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn));
if (pCol == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -1011,7 +1008,7 @@ static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDa
pCol->scale = pType->scale;
pCol->precision = pType->precision;
pCol->dataBlockId = blockId;
-
+ pCol->colType = colType;
return pCol;
}
@@ -1055,7 +1052,8 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
SDataType* pType = &pColNode->node.resType;
pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
pType->precision, pColNode->colName);
- pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType);
+ pExp->base.pParam[0].pCol =
+ createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType, pColNode->colType);
pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN;
} else if (type == QUERY_NODE_VALUE) {
pExp->pExpr->nodeType = QUERY_NODE_VALUE;
@@ -1107,7 +1105,8 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
SColumnNode* pcn = (SColumnNode*)p1;
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN;
- pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType);
+ pExp->base.pParam[j].pCol =
+ createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType, pcn->colType);
} else if (p1->type == QUERY_NODE_VALUE) {
SValueNode* pvn = (SValueNode*)p1;
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
@@ -1181,7 +1180,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
SqlFunctionCtx* pCtx = &pFuncCtx[i];
pCtx->functionId = -1;
- pCtx->curBufPage = -1;
pCtx->pExpr = pExpr;
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
@@ -1194,7 +1192,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
- strncpy(pCtx->udfName, udfName, strlen(udfName));
+ strncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN);
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
@@ -1222,10 +1220,10 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->start.key = INT64_MIN;
pCtx->end.key = INT64_MIN;
pCtx->numOfParams = pExpr->base.numOfParams;
- pCtx->increase = false;
pCtx->isStream = false;
pCtx->param = pFunct->pParam;
+ pCtx->saveHandle.currentPage = -1;
}
for (int32_t i = 1; i < numOfOutput; ++i) {
@@ -1304,6 +1302,7 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
pCond->type = TIMEWINDOW_RANGE_CONTAINED;
pCond->startVersion = -1;
pCond->endVersion = -1;
+ pCond->schemaVersion = -1;
// pCond->type = pTableScanNode->scanFlag;
int32_t j = 0;
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index d8f63cb0082f4822a7b025adf1733510e2cf91a5..278f02b2283f0e9ccc037d01f77e5956179a3627 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -139,8 +139,24 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema) {
if (msg == NULL) {
- // TODO create raw scan
- return NULL;
+ // create raw scan
+
+ SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
+ if (NULL == pTaskInfo) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+ setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
+
+ pTaskInfo->cost.created = taosGetTimestampMs();
+ pTaskInfo->execModel = OPTR_EXEC_MODEL_QUEUE;
+ pTaskInfo->pRoot = createRawScanOperatorInfo(readers, pTaskInfo);
+ if (NULL == pTaskInfo->pRoot) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pTaskInfo);
+ return NULL;
+ }
+ return pTaskInfo;
}
struct SSubplan* pPlan = NULL;
@@ -352,12 +368,14 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model);
if (code != TSDB_CODE_SUCCESS) {
+ qError("failed to createExecTaskInfoImpl, code: %s", tstrerror(code));
goto _error;
}
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 10000, .maxDataBlockNumPerQuery = 5000};
code = dsDataSinkMgtInit(&cfg);
if (code != TSDB_CODE_SUCCESS) {
+ qError("failed to dsDataSinkMgtInit, code: %s", tstrerror(code));
goto _error;
}
@@ -365,6 +383,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
void* pSinkParam = NULL;
code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, pTaskInfo, readHandle);
if (code != TSDB_CODE_SUCCESS) {
+ qError("failed to createDataSinkParam, code: %s", tstrerror(code));
goto _error;
}
@@ -666,15 +685,26 @@ void* qExtractReaderFromStreamScanner(void* scanner) {
return (void*)pInfo->tqReader;
}
-const SSchemaWrapper* qExtractSchemaFromStreamScanner(void* scanner) {
- SStreamScanInfo* pInfo = scanner;
- return pInfo->tqReader->pSchemaWrapper;
+const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ return pTaskInfo->streamInfo.schema;
+}
+
+const char* qExtractTbnameFromTask(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ return pTaskInfo->streamInfo.tbName;
+}
+
+SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
+ return &pTaskInfo->streamInfo.metaRsp;
}
-void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
+int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
- return pTaskInfo->streamInfo.metaBlk;
+ return pTaskInfo->streamInfo.prepareStatus.uid;
}
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
@@ -684,102 +714,167 @@ int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
return 0;
}
-int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
+int32_t initQueryTableDataCondForTmq(SQueryTableDataCond* pCond, SSnapContext* sContext, SMetaTableInfo mtInfo) {
+ memset(pCond, 0, sizeof(SQueryTableDataCond));
+ pCond->order = TSDB_ORDER_ASC;
+ pCond->numOfCols = mtInfo.schema->nCols;
+ pCond->colList = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnInfo));
+ if (pCond->colList == NULL) {
+ terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ return terrno;
+ }
+
+ pCond->twindows = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
+ pCond->suid = mtInfo.suid;
+ pCond->type = TIMEWINDOW_RANGE_CONTAINED;
+ pCond->startVersion = -1;
+ pCond->endVersion = sContext->snapVersion;
+ pCond->schemaVersion = sContext->snapVersion;
+
+ for (int32_t i = 0; i < pCond->numOfCols; ++i) {
+ pCond->colList[i].type = mtInfo.schema->pSchema[i].type;
+ pCond->colList[i].bytes = mtInfo.schema->pSchema[i].bytes;
+ pCond->colList[i].colId = mtInfo.schema->pSchema[i].colId;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
pTaskInfo->streamInfo.prepareStatus = *pOffset;
- if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
- while (1) {
- uint16_t type = pOperator->operatorType;
- pOperator->status = OP_OPENED;
- // TODO add more check
- if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- ASSERT(pOperator->numOfDownstream == 1);
- pOperator = pOperator->pDownstream[0];
- }
+ pTaskInfo->streamInfo.returned = 0;
+ if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
+ return 0;
+ }
+ if (subType == TOPIC_SUB_TYPE__COLUMN) {
+ uint16_t type = pOperator->operatorType;
+ pOperator->status = OP_OPENED;
+ // TODO add more check
+ if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ ASSERT(pOperator->numOfDownstream == 1);
+ pOperator = pOperator->pDownstream[0];
+ }
- SStreamScanInfo* pInfo = pOperator->info;
- if (pOffset->type == TMQ_OFFSET__LOG) {
- STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTSInfo->dataReader);
- pTSInfo->dataReader = NULL;
+ SStreamScanInfo* pInfo = pOperator->info;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
+ tsdbReaderClose(pTSInfo->dataReader);
+ pTSInfo->dataReader = NULL;
#if 0
- if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
- pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
- qError("prepare scan ver %" PRId64 " actual ver %" PRId64 ", last %" PRId64, pOffset->version,
- pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
- ASSERT(0);
- }
+ if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
+ pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
+ qError("prepare scan ver %" PRId64 " actual ver %" PRId64 ", last %" PRId64, pOffset->version,
+ pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
+ ASSERT(0);
+ }
#endif
- if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
+ if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
+ return -1;
+ }
+ ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
+ int64_t uid = pOffset->uid;
+ int64_t ts = pOffset->ts;
+
+ if (uid == 0) {
+ if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
+ STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
+ uid = pTableInfo->uid;
+ ts = INT64_MIN;
+ } else {
return -1;
}
- ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
- } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
- /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
- int64_t uid = pOffset->uid;
- int64_t ts = pOffset->ts;
-
- if (uid == 0) {
- if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
- uid = pTableInfo->uid;
- ts = INT64_MIN;
- } else {
- return -1;
- }
- }
+ }
- /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
- /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
- STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
+ /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
+ /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
+ STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
+ int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
#ifndef NDEBUG
-
- qDebug("switch to next table %" PRId64 " (cursor %d), %" PRId64 " rows returned", uid,
- pTableScanInfo->currentTable, pInfo->pTableScanOp->resultInfo.totalRows);
- pInfo->pTableScanOp->resultInfo.totalRows = 0;
+ qDebug("switch to next table %" PRId64 " (cursor %d), %" PRId64 " rows returned", uid,
+ pTableScanInfo->currentTable, pInfo->pTableScanOp->resultInfo.totalRows);
+ pInfo->pTableScanOp->resultInfo.totalRows = 0;
#endif
- bool found = false;
- for (int32_t i = 0; i < tableSz; i++) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
- if (pTableInfo->uid == uid) {
- found = true;
- pTableScanInfo->currentTable = i;
- break;
- }
+ bool found = false;
+ for (int32_t i = 0; i < tableSz; i++) {
+ STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
+ if (pTableInfo->uid == uid) {
+ found = true;
+ pTableScanInfo->currentTable = i;
+ break;
}
+ }
- // TODO after dropping table, table may be not found
- ASSERT(found);
+ // TODO after dropping table, table may be not found
+ ASSERT(found);
- if (pTableScanInfo->dataReader == NULL) {
- if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
- pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
- pTableScanInfo->dataReader == NULL) {
- ASSERT(0);
- }
+ if (pTableScanInfo->dataReader == NULL) {
+ if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
+ pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
+ pTableScanInfo->dataReader == NULL) {
+ ASSERT(0);
}
+ }
- tsdbSetTableId(pTableScanInfo->dataReader, uid);
- int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
- pTableScanInfo->cond.twindows.skey = ts + 1;
- tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
- pTableScanInfo->cond.twindows.skey = oldSkey;
- pTableScanInfo->scanTimes = 0;
+ tsdbSetTableId(pTableScanInfo->dataReader, uid);
+ int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
+ pTableScanInfo->cond.twindows.skey = ts + 1;
+ tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ pTableScanInfo->cond.twindows.skey = oldSkey;
+ pTableScanInfo->scanTimes = 0;
- qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
- ts, pTableScanInfo->currentTable, tableSz);
- /*}*/
+ qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
+ ts, pTableScanInfo->currentTable, tableSz);
+ /*}*/
+ } else {
+ ASSERT(0);
+ }
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ SSnapContext* sContext = pInfo->sContext;
+ if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ qError("setDataForSnapShot error. uid:%" PRIi64, pOffset->uid);
+ return -1;
+ }
- } else {
- ASSERT(0);
- }
- return 0;
+ SMetaTableInfo mtInfo = getUidfromSnapShot(sContext);
+ tsdbReaderClose(pInfo->dataReader);
+ pInfo->dataReader = NULL;
+ cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
+ taosArrayDestroy(pTaskInfo->tableqinfoList.pTableList);
+ if (mtInfo.uid == 0) return 0; // no data
+
+ initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, mtInfo);
+ pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts;
+ pTaskInfo->tableqinfoList.pTableList = taosArrayInit(1, sizeof(STableKeyInfo));
+ taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &(STableKeyInfo){.uid = mtInfo.uid, .groupId = 0});
+ tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList,
+ &pInfo->dataReader, NULL);
+
+ strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName);
+ tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
+ pTaskInfo->streamInfo.schema = mtInfo.schema;
+
+ qDebug("tmqsnap qStreamPrepareScan snapshot data uid %ld ts %ld", mtInfo.uid, pOffset->ts);
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ SSnapContext* sContext = pInfo->sContext;
+ if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ qError("setForSnapShot error. uid:%" PRIi64 " ,version:%" PRIi64, pOffset->uid);
+ return -1;
}
+ qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %ld ts %ld", pOffset->uid);
+ } else if (pOffset->type == TMQ_OFFSET__LOG) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ tsdbReaderClose(pInfo->dataReader);
+ pInfo->dataReader = NULL;
+ qDebug("tmqsnap qStreamPrepareScan snapshot log");
}
return 0;
}
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 2d72bc813f6b767891bce7d45570eb8db2e2be93..e17b994f0e8bf0a2319d44915ae6a1c4df7507bc 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -76,12 +76,6 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define realloc u_realloc
#endif
-#define T_LONG_JMP(_obj, _c) \
- do { \
- assert((_c) != -1); \
- longjmp((_obj), (_c)); \
- } while (0);
-
#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
@@ -92,19 +86,17 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) {
return 0;
}
-static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes);
-
-static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
+static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pBlock);
static void releaseQueryBuf(size_t numOfTables);
-static void destroyFillOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyAggOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyFillOperatorInfo(void* param);
+static void destroyProjectOperatorInfo(void* param);
+static void destroyOrderOperatorInfo(void* param);
+static void destroyAggOperatorInfo(void* param);
-static void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyIntervalOperatorInfo(void* param);
+static void destroyExchangeOperatorInfo(void* param);
static void destroyOperatorInfo(SOperatorInfo* pOperator);
@@ -148,20 +140,6 @@ static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock,
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
-// setup the output buffer for each operator
-static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) {
- if (TSDB_COL_IS_TAG(pColumn->flag) || TSDB_COL_IS_UD_COL(pColumn->flag) ||
- pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
- return false;
- }
-
- if (pStatis != NULL && pStatis->numOfNull == 0) {
- return false;
- }
-
- return true;
-}
-
#if 0
static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t uid) {
@@ -201,26 +179,23 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR
}
#endif
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) {
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) {
SFilePage* pData = NULL;
// in the first scan, new space needed for results
int32_t pageId = -1;
- SIDList list = getDataBufPagesIdList(pResultBuf, tableGroupId);
-
- if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ if (*currentPageId == -1) {
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
- SPageInfo* pi = getLastPageInfo(list);
- pData = getBufPage(pResultBuf, getPageId(pi));
- pageId = getPageId(pi);
+ pData = getBufPage(pResultBuf, *currentPageId);
+ pageId = *currentPageId;
if (pData->num + interBufSize > getBufPageSize(pResultBuf)) {
// release current page first, and prepare the next one
- releaseBufPageInfo(pResultBuf, pi);
+ releaseBufPage(pResultBuf, pData);
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -237,9 +212,9 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int
SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num);
pResultRow->pageId = pageId;
pResultRow->offset = (int32_t)pData->num;
+ *currentPageId = pageId;
pData->num += interBufSize;
-
return pResultRow;
}
@@ -256,7 +231,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
SResultRow* pResult = NULL;
@@ -278,9 +253,6 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// 1. close current opened time window
if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId))) {
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_1");
-#endif
SResultRowPosition pos = pResultRowInfo->cur;
SFilePage* pPage = getBufPage(pResultBuf, pos.pageId);
releaseBufPage(pResultBuf, pPage);
@@ -288,18 +260,13 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// allocate a new buffer page
if (pResult == NULL) {
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_2");
-#endif
ASSERT(pSup->resultRowSize > 0);
- pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
-
- initResultRow(pResult);
+ pResult = getNewResultRow(pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
// add a new result set for a new group
SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset};
- taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
- sizeof(SResultRowPosition));
+ tSimpleHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
+ sizeof(SResultRowPosition));
}
// 2. set the new time window to be the new active time window
@@ -307,8 +274,8 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// too many time window in query
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH &&
- taosHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
+ tSimpleHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
}
return pResult;
@@ -324,10 +291,10 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// in the first scan, new space needed for results
int32_t pageId = -1;
- SIDList list = getDataBufPagesIdList(pResultBuf, tid);
+ SIDList list = getDataBufPagesIdList(pResultBuf);
if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
SPageInfo* pi = getLastPageInfo(list);
@@ -338,7 +305,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// release current page first, and prepare the next one
releaseBufPageInfo(pResultBuf, pi);
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -392,7 +359,7 @@ static void functionCtxSave(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) {
static void functionCtxRestore(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) {
pCtx->input.colDataAggIsSet = pStatus->hasAgg;
- pCtx->input.numOfRows = pStatus->numOfRows;
+ pCtx->input.numOfRows = pStatus->numOfRows;
pCtx->input.startRowIndex = pStatus->startOffset;
}
@@ -434,7 +401,7 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfo
if (code != TSDB_CODE_SUCCESS) {
qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code));
taskInfo->code = code;
- longjmp(taskInfo->env, code);
+ T_LONG_JMP(taskInfo->env, code);
}
}
@@ -625,7 +592,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
if (pExpr[k].pExpr->nodeType == QUERY_NODE_COLUMN) { // it is a project query
SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId);
if (pResult->info.rows > 0 && !createNewColModel) {
- colDataMergeCol(pColInfoData, pResult->info.rows, &pResult->info.capacity, pInputData->pData[0],
+ colDataMergeCol(pColInfoData, pResult->info.rows, (int32_t*)&pResult->info.capacity, pInputData->pData[0],
pInputData->numOfRows);
} else {
colDataAssign(pColInfoData, pInputData->pData[0], pInputData->numOfRows, &pResult->info);
@@ -663,7 +630,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
int32_t startOffset = createNewColModel ? 0 : pResult->info.rows;
ASSERT(pResult->info.capacity > 0);
- colDataMergeCol(pResColData, startOffset, &pResult->info.capacity, &idata, dest.numOfRows);
+ colDataMergeCol(pResColData, startOffset, (int32_t*)&pResult->info.capacity, &idata, dest.numOfRows);
colDataDestroy(&idata);
numOfRows = dest.numOfRows;
@@ -728,7 +695,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
int32_t startOffset = createNewColModel ? 0 : pResult->info.rows;
ASSERT(pResult->info.capacity > 0);
- colDataMergeCol(pResColData, startOffset, &pResult->info.capacity, &idata, dest.numOfRows);
+ colDataMergeCol(pResColData, startOffset, (int32_t*)&pResult->info.capacity, &idata, dest.numOfRows);
colDataDestroy(&idata);
numOfRows = dest.numOfRows;
@@ -848,13 +815,6 @@ void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pB
} else {
pInput->colDataAggIsSet = false;
}
-
- // set the statistics data for primary time stamp column
- // if (pCtx->functionId == FUNCTION_SPREAD && pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
- // pCtx->isAggSet = true;
- // pCtx->agg.min = pBlock->info.window.skey;
- // pCtx->agg.max = pBlock->info.window.ekey;
- // }
}
bool isTaskKilled(SExecTaskInfo* pTaskInfo) {
@@ -891,146 +851,6 @@ STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int
return win;
}
-#if 0
-static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) {
-
- bool hasFirstLastFunc = false;
- bool hasOtherFunc = false;
-
- if (status == BLK_DATA_DATA_LOAD || status == BLK_DATA_FILTEROUT) {
- return status;
- }
-
- for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
- int32_t functionId = getExprFunctionId(&pQuery->pExpr1[i]);
-
- if (functionId == FUNCTION_TS || functionId == FUNCTION_TS_DUMMY || functionId == FUNCTION_TAG ||
- functionId == FUNCTION_TAG_DUMMY) {
- continue;
- }
-
- if (functionId == FUNCTION_FIRST_DST || functionId == FUNCTION_LAST_DST) {
- hasFirstLastFunc = true;
- } else {
- hasOtherFunc = true;
- }
-
- }
-
- if (hasFirstLastFunc && status == BLK_DATA_NOT_LOAD) {
- if (!hasOtherFunc) {
- return BLK_DATA_FILTEROUT;
- } else {
- return BLK_DATA_DATA_LOAD;
- }
- }
-
- return status;
-}
-
-#endif
-
-// static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableReq* pQueryMsg, bool stableQuery) {
-// STaskAttr* pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
-//
-// // in case of point-interpolation query, use asc order scan
-// char msg[] = "QInfo:0x%"PRIx64" scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%"
-// PRId64
-// "-%" PRId64 ", new qrange:%" PRId64 "-%" PRId64;
-//
-// // todo handle the case the the order irrelevant query type mixed up with order critical query type
-// // descending order query for last_row query
-// if (isFirstLastRowQuery(pQueryAttr)) {
-// //qDebug("QInfo:0x%"PRIx64" scan order changed for last_row query, old:%d, new:%d", pQInfo->qId,
-// pQueryAttr->order.order, TSDB_ORDER_ASC);
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// if (pQueryAttr->window.skey > pQueryAttr->window.ekey) {
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// }
-//
-// pQueryAttr->needReverseScan = false;
-// return;
-// }
-//
-// if (pQueryAttr->groupbyColumn && pQueryAttr->order.order == TSDB_ORDER_DESC) {
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// if (pQueryAttr->window.skey > pQueryAttr->window.ekey) {
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// }
-//
-// pQueryAttr->needReverseScan = false;
-// doUpdateLastKey(pQueryAttr);
-// return;
-// }
-//
-// if (pQueryAttr->pointInterpQuery && pQueryAttr->interval.interval == 0) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "interp", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey,
-// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); TSWAP(pQueryAttr->window.skey,
-// pQueryAttr->window.ekey, TSKEY);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// return;
-// }
-//
-// if (pQueryAttr->interval.interval == 0) {
-// if (onlyFirstQuery(pQueryAttr)) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-first", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey,
-//// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// pQueryAttr->needReverseScan = false;
-// } else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) {
-// if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey,
-//// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_DESC;
-// pQueryAttr->needReverseScan = false;
-// }
-//
-// } else { // interval query
-// if (stableQuery) {
-// if (onlyFirstQuery(pQueryAttr)) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-first stable", pQueryAttr->order.order, TSDB_ORDER_ASC,
-//// pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey,
-/// pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// pQueryAttr->needReverseScan = false;
-// } else if (onlyLastQuery(pQueryAttr)) {
-// if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC,
-//// pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey,
-/// pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_DESC;
-// pQueryAttr->needReverseScan = false;
-// }
-// }
-// }
-//}
-
#if 0
static bool overlapWithTimeWindow(STaskAttr* pQueryAttr, SDataBlockInfo* pBlockInfo) {
STimeWindow w = {0};
@@ -1152,7 +972,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowEntryInfoOffset) != TSDB_CODE_SUCCESS) {
- longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
}
} else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery)) { // stable aggregate, not interval aggregate or normal column aggregate
@@ -1203,7 +1023,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowEntryInfoOffset) != TSDB_CODE_SUCCESS) {
- longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
}
}
@@ -1256,24 +1076,6 @@ static void updateTableQueryInfoForReverseScan(STableQueryInfo* pTableQueryInfo)
if (pTableQueryInfo == NULL) {
return;
}
-
- // TSWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey);
- // pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
-
- // SWITCH_ORDER(pTableQueryInfo->cur.order);
- // pTableQueryInfo->cur.vgroupIndex = -1;
-
- // set the index to be the end slot of result rows array
- // SResultRowInfo* pResultRowInfo = &pTableQueryInfo->resInfo;
- // if (pResultRowInfo->size > 0) {
- // pResultRowInfo->curPos = pResultRowInfo->size - 1;
- // } else {
- // pResultRowInfo->curPos = -1;
- // }
-}
-
-void initResultRow(SResultRow* pResultRow) {
- // pResultRow->pEntryInfo = (struct SResultRowEntryInfo*)((char*)pResultRow + sizeof(SResultRow));
}
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
@@ -1286,15 +1088,6 @@ void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
}
}
-void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) {
- if (pTableQueryInfo == NULL) {
- return;
- }
-
- // taosVariantDestroy(&pTableQueryInfo->tag);
- // cleanupResultRowInfo(&pTableQueryInfo->resInfo);
-}
-
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset) {
bool init = false;
for (int32_t i = 0; i < numOfOutput; ++i) {
@@ -1495,7 +1288,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
if (TAOS_FAILED(code)) {
releaseBufPage(pBuf, page);
qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -1507,7 +1300,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
// do nothing, todo refactor
@@ -1581,7 +1374,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
// do nothing, todo refactor
@@ -1590,16 +1383,8 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
// the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows.
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId);
char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo);
- if (pCtx[j].increase) {
- int64_t ts = *(int64_t*)in;
- for (int32_t k = 0; k < pRow->numOfRows; ++k) {
- colDataAppend(pColInfoData, pBlock->info.rows + k, (const char*)&ts, pCtx[j].resultInfo->isNullRes);
- ts++;
- }
- } else {
- for (int32_t k = 0; k < pRow->numOfRows; ++k) {
- colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
- }
+ for (int32_t k = 0; k < pRow->numOfRows; ++k) {
+ colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
}
}
}
@@ -1736,7 +1521,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
// SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
// while (tsdbNextDataBlock(pTsdbReadHandle)) {
// if (isTaskKilled(pRuntimeEnv->qinfo)) {
-// longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+// T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
// }
//
// tsdbRetrieveDataBlockInfo(pTsdbReadHandle, &blockInfo);
@@ -1755,7 +1540,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
// }
//
// if (terrno != TSDB_CODE_SUCCESS) {
-// longjmp(pRuntimeEnv->env, terrno);
+// T_LONG_JMP(pRuntimeEnv->env, terrno);
// }
// }
@@ -1919,7 +1704,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
//
// // check for error
// if (terrno != TSDB_CODE_SUCCESS) {
-// longjmp(pRuntimeEnv->env, terrno);
+// T_LONG_JMP(pRuntimeEnv->env, terrno);
// }
//
// return true;
@@ -2771,7 +2556,7 @@ static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) {
int32_t code = tsortOpen(pInfo->pSortHandle);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pOperator->status = OP_RES_TO_RETURN;
@@ -2854,92 +2639,6 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
}
}
}
-#if 0
-int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
- uint8_t type = pOperator->operatorType;
-
- pOperator->status = OP_OPENED;
-
- if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- pScanInfo->blockType = STREAM_INPUT__TABLE_SCAN;
-
- pScanInfo->pTableScanOp->status = OP_OPENED;
-
- STableScanInfo* pInfo = pScanInfo->pTableScanOp->info;
- ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER);
-
- if (uid == 0) {
- pInfo->noTable = 1;
- return TSDB_CODE_SUCCESS;
- }
-
- /*if (pSnapShotScanInfo->dataReader == NULL) {*/
- /*pSnapShotScanInfo->dataReader = tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, 0, 0);*/
- /*pSnapShotScanInfo->scanMode = TABLE_SCAN__TABLE_ORDER;*/
- /*}*/
-
- pInfo->noTable = 0;
-
- if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
-
- int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
- bool found = false;
- for (int32_t i = 0; i < tableSz; i++) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
- if (pTableInfo->uid == uid) {
- found = true;
- pInfo->currentTable = i;
- }
- }
- // TODO after processing drop, found can be false
- ASSERT(found);
-
- tsdbSetTableId(pInfo->dataReader, uid);
- int64_t oldSkey = pInfo->cond.twindows.skey;
- pInfo->cond.twindows.skey = ts + 1;
- tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
- pInfo->cond.twindows.skey = oldSkey;
- pInfo->scanTimes = 0;
-
- qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, ts,
- pInfo->currentTable, tableSz);
- }
-
- return TSDB_CODE_SUCCESS;
-
- } else {
- if (pOperator->numOfDownstream == 1) {
- return doPrepareScan(pOperator->pDownstream[0], uid, ts);
- } else if (pOperator->numOfDownstream == 0) {
- qError("failed to find stream scan operator to set the input data block");
- return TSDB_CODE_QRY_APP_ERROR;
- } else {
- qError("join not supported for stream block scan");
- return TSDB_CODE_QRY_APP_ERROR;
- }
- }
-}
-
-int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts) {
- int32_t type = pOperator->operatorType;
- if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- STableScanInfo* pSnapShotScanInfo = pScanInfo->pTableScanOp->info;
- *uid = pSnapShotScanInfo->lastStatus.uid;
- *ts = pSnapShotScanInfo->lastStatus.ts;
- } else {
- if (pOperator->pDownstream[0] == NULL) {
- return TSDB_CODE_INVALID_PARA;
- } else {
- doGetScanStatus(pOperator->pDownstream[0], uid, ts);
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-#endif
// this is a blocking operator
static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
@@ -2966,7 +2665,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// there is an scalar expression that needs to be calculated before apply the group aggregation.
@@ -2974,7 +2673,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
SExprSupp* pSup1 = &pAggInfo->scalarExprSup;
code = projectApplyFunctions(pSup1->pExprInfo, pBlock, pBlock, pSup1->pCtx, pSup1->numOfExprs, NULL);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -2983,7 +2682,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, true);
code = doAggregateImpl(pOperator, pSup->pCtx);
if (code != 0) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -3036,7 +2735,7 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
}
SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
- int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
+ int32_t size = tSimpleHashGetSize(pSup->pResultRowHashTable);
size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length
int32_t totalSize =
sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
@@ -3064,9 +2763,10 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
setBufPageDirty(pPage, true);
releaseBufPage(pSup->pResultBuf, pPage);
- void* pIter = taosHashIterate(pSup->pResultRowHashTable, NULL);
- while (pIter) {
- void* key = taosHashGetKey(pIter, &keyLen);
+ int32_t iter = 0;
+ void* pIter = NULL;
+ while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) {
+ void* key = tSimpleHashGetKey(pIter, &keyLen);
SResultRowPosition* p1 = (SResultRowPosition*)pIter;
pPage = (SFilePage*)getBufPage(pSup->pResultBuf, p1->pageId);
@@ -3097,8 +2797,6 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
offset += sizeof(int32_t);
memcpy(*result + offset, pRow, pSup->resultRowSize);
offset += pSup->resultRowSize;
-
- pIter = taosHashIterate(pSup->pResultRowHashTable, pIter);
}
*(int32_t*)(*result) = offset;
@@ -3126,14 +2824,14 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
offset += sizeof(int32_t);
uint64_t tableGroupId = *(uint64_t*)(result + offset);
- SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
+ SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
if (!resultRow) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
// add a new result set for a new group
SResultRowPosition pos = {.pageId = resultRow->pageId, .offset = resultRow->offset};
- taosHashPut(pSup->pResultRowHashTable, result + offset, keyLen, &pos, sizeof(SResultRowPosition));
+ tSimpleHashPut(pSup->pResultRowHashTable, result + offset, keyLen, &pos, sizeof(SResultRowPosition));
offset += keyLen;
int32_t valueLen = *(int32_t*)(result + offset);
@@ -3148,7 +2846,6 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
resultRow->offset = pOffset;
offset += valueLen;
- initResultRow(resultRow);
pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
// releaseBufPage(pSup->pResultBuf, getBufPage(pSup->pResultBuf, pageId));
}
@@ -3250,6 +2947,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp
Q_STATUS_EQUAL(pTaskInfo->status, TASK_COMPLETED) ? pInfo->win.ekey : pInfo->existNewGroupBlock->info.window.ekey;
taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo));
+ blockDataCleanup(pInfo->pRes);
doApplyScalarCalculation(pOperator, pInfo->existNewGroupBlock, order, scanFlag);
taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, ekey);
@@ -3312,7 +3010,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
SSDataBlock* pResBlock = pInfo->pFinalRes;
blockDataCleanup(pResBlock);
- blockDataCleanup(pInfo->pRes);
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
@@ -3336,6 +3033,8 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey);
} else {
blockDataUpdateTsWindow(pBlock, pInfo->primarySrcSlotId);
+
+ blockDataCleanup(pInfo->pRes);
doApplyScalarCalculation(pOperator, pBlock, order, scanFlag);
if (pInfo->curGroupId == 0 || pInfo->curGroupId == pInfo->pRes->info.groupId) {
@@ -3378,7 +3077,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
assert(pBlock != NULL);
blockDataCleanup(pResBlock);
- blockDataCleanup(pInfo->pRes);
doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, pTaskInfo);
if (pResBlock->info.rows > pResultInfo->threshold) {
@@ -3440,7 +3138,7 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) {
}
if (pOperator->fpSet.closeFn != NULL) {
- pOperator->fpSet.closeFn(pOperator->info, pOperator->exprSupp.numOfExprs);
+ pOperator->fpSet.closeFn(pOperator->info);
}
if (pOperator->pDownstream != NULL) {
@@ -3473,11 +3171,13 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
+ int32_t code = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pAggSup->currentPageId = -1;
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
- pAggSup->pResultRowHashTable = taosHashInit(10, hashFn, true, HASH_NO_LOCK);
+ pAggSup->pResultRowHashTable = tSimpleHashInit(10, hashFn);
if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -3488,22 +3188,23 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
if (!osTempSpaceAvailable()) {
- terrno = TSDB_CODE_NO_AVAIL_DISK;
- qError("Init stream agg supporter failed since %s", terrstr(terrno));
- return terrno;
+ code = TSDB_CODE_NO_AVAIL_DISK;
+ qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey);
+ return code;
}
- int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
+
+ code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
if (code != TSDB_CODE_SUCCESS) {
- qError("Create agg result buf failed since %s", tstrerror(code));
+ qError("Create agg result buf failed since %s, %s", tstrerror(code), pKey);
return code;
}
- return TSDB_CODE_SUCCESS;
+ return code;
}
void cleanupAggSup(SAggSupporter* pAggSup) {
taosMemoryFreeClear(pAggSup->keyBuf);
- taosHashCleanup(pAggSup->pResultRowHashTable);
+ tSimpleHashCleanup(pAggSup->pResultRowHashTable);
destroyDiskbasedBuf(pAggSup->pResultBuf);
}
@@ -3520,7 +3221,7 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
}
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = pAggSup->pResultBuf;
+ pSup->pCtx[i].saveHandle.pBuf = pAggSup->pResultBuf;
}
return TSDB_CODE_SUCCESS;
@@ -3552,6 +3253,7 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
}
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
+ taosMemoryFreeClear(pCtx[i].subsidiaries.buf);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
}
@@ -3631,7 +3333,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
return pOperator;
_error:
- destroyAggOperatorInfo(pInfo, numOfCols);
+ destroyAggOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
@@ -3639,7 +3341,6 @@ _error:
void cleanupBasicInfo(SOptrBasicInfo* pInfo) {
assert(pInfo != NULL);
- cleanupResultRowInfo(&pInfo->resultRowInfo);
pInfo->pRes = blockDataDestroy(pInfo->pRes);
}
@@ -3657,7 +3358,7 @@ static void freeItem(void* pItem) {
}
}
-void destroyAggOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyAggOperatorInfo(void* param) {
SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
@@ -3667,7 +3368,7 @@ void destroyAggOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-void destroyFillOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyFillOperatorInfo(void* param) {
SFillOperatorInfo* pInfo = (SFillOperatorInfo*)param;
pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo);
pInfo->pRes = blockDataDestroy(pInfo->pRes);
@@ -3683,7 +3384,7 @@ void destroyFillOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyExchangeOperatorInfo(void* param) {
SExchangeInfo* pExInfo = (SExchangeInfo*)param;
taosRemoveRef(exchangeObjRefPool, pExInfo->self);
}
@@ -3715,7 +3416,7 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t
const char* id, SInterval* pInterval, int32_t fillType, int32_t order) {
SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode);
- int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey;
+ int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey;
STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, startKey);
w = getFirstQualifiedTimeWindow(startKey, &w, pInterval, order);
@@ -3988,15 +3689,15 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
bool assignUid = groupbyTbname(group);
- size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList);
+ size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList);
- if(assignUid){
+ if (assignUid) {
for (int32_t i = 0; i < numOfTables; i++) {
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
info->groupId = info->uid;
taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t));
}
- }else{
+ } else {
int32_t code = getColInfoResultForGroupby(pHandle->meta, group, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -4030,6 +3731,7 @@ static int32_t initTableblockDistQueryCond(uint64_t uid, SQueryTableDataCond* pC
pCond->type = TIMEWINDOW_RANGE_CONTAINED;
pCond->startVersion = -1;
pCond->endVersion = -1;
+ pCond->schemaVersion = -1;
return TSDB_CODE_SUCCESS;
}
@@ -4049,6 +3751,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo));
if (code) {
pTaskInfo->code = code;
+ qError("failed to createScanTableListInfo, code:%s, %s", tstrerror(code), GET_TASKID(pTaskInfo));
return NULL;
}
@@ -4068,6 +3771,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo));
if (code) {
pTaskInfo->code = code;
+ qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
return NULL;
}
@@ -4091,6 +3795,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pHandle, pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo));
if (code) {
pTaskInfo->code = code;
+ qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
return NULL;
}
@@ -4100,8 +3805,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STableKeyInfo* pKeyInfo = taosArrayGet(pTableListInfo->pTableList, i);
qDebug("creating stream task: add table %" PRId64, pKeyInfo->uid);
}
- }
#endif
+ }
pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan);
pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTaskInfo);
@@ -4113,6 +3818,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanPhyNode, pTagCond, pTagIndexCond, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
pTaskInfo->code = terrno;
+ qError("failed to getTableList, code: %s", tstrerror(code));
return NULL;
}
@@ -4159,7 +3865,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return NULL;
}
- pOperator = createLastrowScanOperator(pScanNode, pHandle, pTaskInfo);
+ pOperator = createCacherowsScanOperator(pScanNode, pHandle, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) {
pOperator = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo);
} else {
@@ -4183,9 +3889,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
if (ops[i] == NULL) {
taosMemoryFree(ops);
return NULL;
- } else {
- ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId;
}
+
+ ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId;
}
SOperatorInfo* pOptr = NULL;
@@ -4237,37 +3943,10 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) {
SMergeAlignedIntervalPhysiNode* pIntervalPhyNode = (SMergeAlignedIntervalPhysiNode*)pPhyNode;
-
- SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
-
- SInterval interval = {.interval = pIntervalPhyNode->interval,
- .sliding = pIntervalPhyNode->sliding,
- .intervalUnit = pIntervalPhyNode->intervalUnit,
- .slidingUnit = pIntervalPhyNode->slidingUnit,
- .offset = pIntervalPhyNode->offset,
- .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
-
- int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
- pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
- pPhyNode->pConditions, pIntervalPhyNode->window.mergeDataBlock,
- pTaskInfo);
+ pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) {
SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode;
-
- SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
-
- SInterval interval = {.interval = pIntervalPhyNode->interval,
- .sliding = pIntervalPhyNode->sliding,
- .intervalUnit = pIntervalPhyNode->intervalUnit,
- .slidingUnit = pIntervalPhyNode->slidingUnit,
- .offset = pIntervalPhyNode->offset,
- .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
-
- int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
- pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
- pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
+ pOptr = createMergeIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
int32_t children = 0;
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
@@ -4294,19 +3973,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
} else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) {
pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION == type) {
+ pOptr = createStreamPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) {
SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode;
-
- STimeWindowAggSupp as = {.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType};
-
- SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
- int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
-
- SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
- SColumn col = extractColumnFromColumnNode(pColNode);
- pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, &col, pPhyNode->pConditions,
- pTaskInfo);
+ pOptr = createStatewindowOperatorInfo(ops[0], pStateNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) {
pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) {
@@ -4320,8 +3991,12 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else {
ASSERT(0);
}
+
taosMemoryFree(ops);
- if (pOptr) pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
+ if (pOptr) {
+ pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
+ }
+
return pOptr;
}
@@ -4358,42 +4033,6 @@ SArray* extractColumnInfo(SNodeList* pNodeList) {
return pList;
}
-#if 0
-STsdbReader* doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
- STableListInfo* pTableListInfo, const char* idstr) {
- int32_t code = getTableList(pHandle->meta, pHandle->vnode, &pTableScanNode->scan, pTableListInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
- code = 0;
- qDebug("no table qualified for query, %s", idstr);
- goto _error;
- }
-
- SQueryTableDataCond cond = {0};
- code = initQueryTableDataCond(&cond, pTableScanNode);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- STsdbReader* pReader;
- code = tsdbReaderOpen(pHandle->vnode, &cond, pTableListInfo->pTableList, &pReader, idstr);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- cleanupQueryTableDataCond(&cond);
-
- return pReader;
-
-_error:
- terrno = code;
- return NULL;
-}
-#endif
-
static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
@@ -4611,6 +4250,10 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
goto _complete;
}
+ if (pHandle && pHandle->pStateBackend) {
+ (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend;
+ }
+
(*pTaskInfo)->sql = sql;
sql = NULL;
(*pTaskInfo)->pSubplan = pPlan;
@@ -4631,7 +4274,7 @@ _complete:
return code;
}
-static void doDestroyTableList(STableListInfo* pTableqinfoList) {
+void doDestroyTableList(STableListInfo* pTableqinfoList) {
taosArrayDestroy(pTableqinfoList->pTableList);
taosHashCleanup(pTableqinfoList->map);
if (pTableqinfoList->needSortTableByGroupId) {
@@ -4663,27 +4306,6 @@ void doDestroyTask(SExecTaskInfo* pTaskInfo) {
taosMemoryFreeClear(pTaskInfo);
}
-static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes) {
- if (val == NULL) {
- setNull(output, type, bytes);
- return;
- }
-
- if (IS_VAR_DATA_TYPE(type)) {
- // Binary data overflows for sort of unknown reasons. Let trim the overflow data
- if (varDataTLen(val) > bytes) {
- int32_t maxLen = bytes - VARSTR_HEADER_SIZE;
- int32_t len = (varDataLen(val) > maxLen) ? maxLen : varDataLen(val);
- memcpy(varDataVal(output), varDataVal(val), len);
- varDataSetLen(output, len);
- } else {
- varDataCopy(output, val);
- }
- } else {
- memcpy(output, val, bytes);
- }
-}
-
static int64_t getQuerySupportBufSize(size_t numOfTables) {
size_t s1 = sizeof(STableQueryInfo);
// size_t s3 = sizeof(STableCheckInfo); buffer consumption in tsdb
@@ -4756,6 +4378,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size) {
+ pSup->currentPageId = -1;
pSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pSup->keySize = sizeof(int64_t) + sizeof(TSKEY);
pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize);
@@ -4783,7 +4406,8 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF
}
int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, tsTempDir);
for (int32_t i = 0; i < numOfOutput; ++i) {
- pCtx[i].pBuf = pSup->pResultBuf;
+ pCtx[i].saveHandle.pBuf = pSup->pResultBuf;
}
+
return code;
}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 05dffc658b29bb5eb6675edae62d04bb6442cc48..599edb07222840d6e3bc74889ad0bd52bba50907 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -36,8 +36,12 @@ static void freeGroupKey(void* param) {
taosMemoryFree(pKey->pData);
}
-static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyGroupOperatorInfo(void* param) {
SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param;
+ if (pInfo == NULL) {
+ return;
+ }
+
cleanupBasicInfo(&pInfo->binfo);
taosMemoryFreeClear(pInfo->keyBuf);
taosArrayDestroy(pInfo->pGroupCols);
@@ -247,7 +251,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
if (!pInfo->isInit) {
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pInfo->isInit = true;
num++;
@@ -265,7 +269,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
num++;
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
continue;
}
@@ -273,7 +277,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
int32_t rowIndex = j - num;
@@ -291,7 +295,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len,
pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
int32_t rowIndex = pBlock->info.rows - num;
@@ -350,7 +354,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// the pDataBlock are always the same one, no need to call this again
@@ -360,7 +364,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
if (pInfo->scalarSup.pExprInfo != NULL) {
pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL);
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, pTaskInfo->code);
+ T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
}
@@ -413,7 +417,11 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
}
initResultSizeInfo(&pOperator->resultInfo, 4096);
- initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str);
+ code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
initBasicInfo(&pInfo->binfo, pResultBlock);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
@@ -426,11 +434,15 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashGroupbyAggregate, NULL, NULL, destroyGroupOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
return pOperator;
_error:
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
- taosMemoryFreeClear(pInfo);
+ destroyGroupOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
return NULL;
}
@@ -535,7 +547,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len);
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
*(int32_t *) pPage = 0;
@@ -550,7 +562,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
// add a new page for current group
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
memset(pPage, 0, getBufPageSize(pInfo->pBuf));
}
@@ -678,20 +690,20 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
if (pInfo->scalarSup.pExprInfo != NULL) {
pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL);
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, pTaskInfo->code);
+ T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
}
terrno = TSDB_CODE_SUCCESS;
doHashPartition(pOperator, pBlock);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
}
SArray* groupArray = taosArrayInit(taosHashGetSize(pInfo->pGroupSet), sizeof(SDataGroupInfo));
- void* pGroupIter = NULL;
- pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL);
+
+ void* pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL);
while (pGroupIter != NULL) {
SDataGroupInfo* pGroupInfo = pGroupIter;
taosArrayPush(groupArray, pGroupInfo);
@@ -710,7 +722,7 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
return buildPartitionResult(pOperator);
}
-static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyPartitionOperatorInfo(void* param) {
SPartitionOperatorInfo* pInfo = (SPartitionOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
taosArrayDestroy(pInfo->pGroupCols);
@@ -818,3 +830,205 @@ int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo,
setResultRowInitCtx(pResultRow, pCtx, numOfCols, pOperator->exprSupp.rowEntryInfoOffset);
return TSDB_CODE_SUCCESS;
}
+
+uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId) {
+ if (pExprSup->pExprInfo != NULL) {
+ int32_t code = projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("calaculate group id error, code:%d", code);
+ }
+ }
+ recordNewGroupKeys(pParSup->pGroupCols, pParSup->pGroupColVals, pBlock, rowId);
+ int32_t len = buildGroupKeys(pParSup->keyBuf, pParSup->pGroupColVals);
+ uint64_t groupId = calcGroupId(pParSup->keyBuf, len);
+ return groupId;
+}
+
+static bool hasRemainPartion(SStreamPartitionOperatorInfo* pInfo) {
+ return pInfo->parIte != NULL;
+}
+
+static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
+ SStreamPartitionOperatorInfo* pInfo = pOperator->info;
+ SSDataBlock* pDest = pInfo->binfo.pRes;
+ ASSERT(hasRemainPartion(pInfo));
+ SPartitionDataInfo* pParInfo = (SPartitionDataInfo*)pInfo->parIte;
+ blockDataCleanup(pDest);
+ int32_t rows = taosArrayGetSize(pParInfo->rowIds);
+ SSDataBlock* pSrc = pInfo->pInputDataBlock;
+ for (int32_t i = 0; i < rows; i++) {
+ int32_t rowIndex = *(int32_t*)taosArrayGet(pParInfo->rowIds, i);
+ for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; j++) {
+ int32_t slotId = pOperator->exprSupp.pExprInfo[j].base.pParam[0].pCol->slotId;
+ SColumnInfoData* pSrcCol = taosArrayGet(pSrc->pDataBlock, slotId);
+ SColumnInfoData* pDestCol = taosArrayGet(pDest->pDataBlock, j);
+ bool isNull = colDataIsNull(pSrcCol, pSrc->info.rows, rowIndex, NULL);
+ char* pSrcData = colDataGetData(pSrcCol, rowIndex);
+ colDataAppend(pDestCol, pDest->info.rows, pSrcData, isNull);
+ }
+ pDest->info.rows++;
+ }
+ blockDataUpdateTsWindow(pDest, pInfo->tsColIndex);
+ pDest->info.groupId = pParInfo->groupId;
+ pOperator->resultInfo.totalRows += pDest->info.rows;
+ pInfo->parIte = taosHashIterate(pInfo->pPartitions, pInfo->parIte);
+ ASSERT(pDest->info.rows > 0);
+ printDataBlock(pDest, "stream partitionby");
+ return pDest;
+}
+
+static void doStreamHashPartitionImpl(SStreamPartitionOperatorInfo* pInfo, SSDataBlock* pBlock) {
+ pInfo->pInputDataBlock = pBlock;
+ for (int32_t i = 0; i < pBlock->info.rows; ++i) {
+ recordNewGroupKeys(pInfo->partitionSup.pGroupCols, pInfo->partitionSup.pGroupColVals, pBlock, i);
+ int32_t keyLen = buildGroupKeys(pInfo->partitionSup.keyBuf, pInfo->partitionSup.pGroupColVals);
+ SPartitionDataInfo* pParData =
+ (SPartitionDataInfo*) taosHashGet(pInfo->pPartitions, pInfo->partitionSup.keyBuf, keyLen);
+ if (pParData) {
+ taosArrayPush(pParData->rowIds, &i);
+ } else {
+ SPartitionDataInfo newParData = {0};
+ newParData.groupId = calcGroupId(pInfo->partitionSup.keyBuf, keyLen);
+ newParData.rowIds = taosArrayInit(64, sizeof(int32_t));
+ taosArrayPush(newParData.rowIds, &i);
+ taosHashPut(pInfo->pPartitions, pInfo->partitionSup.keyBuf, keyLen, &newParData,
+ sizeof(SPartitionDataInfo));
+ }
+ }
+}
+
+static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) {
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamPartitionOperatorInfo* pInfo = pOperator->info;
+ if (hasRemainPartion(pInfo)) {
+ return buildStreamPartitionResult(pOperator);
+ }
+
+ int64_t st = taosGetTimestampUs();
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+ {
+ pInfo->pInputDataBlock = NULL;
+ SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
+ if (pBlock == NULL) {
+ doSetOperatorCompleted(pOperator);
+ return NULL;
+ }
+ printDataBlock(pBlock, "stream partitionby recv");
+ switch (pBlock->info.type) {
+ case STREAM_NORMAL:
+ case STREAM_PULL_DATA:
+ case STREAM_INVALID:
+ pInfo->binfo.pRes->info.type = pBlock->info.type;
+ break;
+ default:
+ return pBlock;
+ }
+
+ // there is an scalar expression that needs to be calculated right before apply the group aggregation.
+ if (pInfo->scalarSup.pExprInfo != NULL) {
+ pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock,
+ pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL);
+ if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
+ longjmp(pTaskInfo->env, pTaskInfo->code);
+ }
+ }
+ taosHashClear(pInfo->pPartitions);
+ doStreamHashPartitionImpl(pInfo, pBlock);
+ }
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
+
+ pInfo->parIte = taosHashIterate(pInfo->pPartitions, NULL);
+ return buildStreamPartitionResult(pOperator);
+}
+
+static void destroyStreamPartitionOperatorInfo(void* param) {
+ SStreamPartitionOperatorInfo* pInfo = (SStreamPartitionOperatorInfo*)param;
+ cleanupBasicInfo(&pInfo->binfo);
+ taosArrayDestroy(pInfo->partitionSup.pGroupCols);
+
+ for(int i = 0; i < taosArrayGetSize(pInfo->partitionSup.pGroupColVals); i++){
+ SGroupKeys key = *(SGroupKeys*)taosArrayGet(pInfo->partitionSup.pGroupColVals, i);
+ taosMemoryFree(key.pData);
+ }
+ taosArrayDestroy(pInfo->partitionSup.pGroupColVals);
+
+ taosMemoryFree(pInfo->partitionSup.keyBuf);
+ cleanupExprSupp(&pInfo->scalarSup);
+ taosMemoryFreeClear(param);
+}
+
+void initParDownStream(SOperatorInfo* downstream, SPartitionBySupporter* pParSup, SExprSupp* pExpr) {
+ if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ return;
+ }
+ SStreamScanInfo* pScanInfo = downstream->info;
+ pScanInfo->partitionSup = *pParSup;
+ pScanInfo->pPartScalarSup = pExpr;
+}
+
+SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo) {
+ SStreamPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamPartitionOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ goto _error;
+ }
+ int32_t code = TSDB_CODE_SUCCESS;
+ pInfo->partitionSup.pGroupCols = extractPartitionColInfo(pPartNode->pPartitionKeys);
+
+ if (pPartNode->pExprs != NULL) {
+ int32_t num = 0;
+ SExprInfo* pCalExprInfo = createExprInfo(pPartNode->pExprs, NULL, &num);
+ code = initExprSupp(&pInfo->scalarSup, pCalExprInfo, num);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ }
+
+ int32_t keyLen = 0;
+ code = initGroupOptrInfo(&pInfo->partitionSup.pGroupColVals, &keyLen, &pInfo->partitionSup.keyBuf, pInfo->partitionSup.pGroupCols);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ pInfo->partitionSup.needCalc = true;
+
+ SSDataBlock* pResBlock = createResDataBlock(pPartNode->node.pOutputDataBlockDesc);
+ if (!pResBlock) {
+ goto _error;
+ }
+ blockDataEnsureCapacity(pResBlock, 4096);
+ pInfo->binfo.pRes = pResBlock;
+ pInfo->parIte = NULL;
+ pInfo->pInputDataBlock = NULL;
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pInfo->pPartitions = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
+ pInfo->tsColIndex = 0;
+
+ int32_t numOfCols = 0;
+ SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &numOfCols);
+
+ pOperator->name = "StreamPartitionOperator";
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION;
+ pOperator->exprSupp.numOfExprs = numOfCols;
+ pOperator->exprSupp.pExprInfo = pExprInfo;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamHashPartition, NULL, NULL, destroyStreamPartitionOperatorInfo,
+ NULL, NULL, NULL);
+
+ initParDownStream(downstream, &pInfo->partitionSup, &pInfo->scalarSup);
+ code = appendDownstream(pOperator, &downstream, 1);
+ return pOperator;
+
+ _error:
+ pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFreeClear(pInfo);
+ taosMemoryFreeClear(pOperator);
+ return NULL;
+}
+
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index 7d2b84d0f053a7c8c6e3f63db719f67b3d9e99f3..1bc7d458e0ee16decabea988a16713996d2468ce 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -25,7 +25,7 @@
static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode);
static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator);
-static void destroyMergeJoinOperator(void* param, int32_t numOfOutput);
+static void destroyMergeJoinOperator(void* param);
static void extractTimeCondition(SJoinOperatorInfo* pInfo, SOperatorInfo** pDownstream, int32_t numOfDownstream,
SSortMergeJoinPhysiNode* pJoinNode);
@@ -128,12 +128,11 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) {
pColumn->scale = pColumnNode->node.resType.scale;
}
-void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
+void destroyMergeJoinOperator(void* param) {
SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes);
-
taosMemoryFreeClear(param);
}
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index f2b79bf703343a8365b832b23dcd514e5ecc0574..2f12a0d19bdf74e7b0b2ab94c373a31cbe7d8316 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -23,7 +23,7 @@ static SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOf
static void setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage,
int32_t numOfExprs);
-static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyProjectOperatorInfo(void* param) {
if (NULL == param) {
return;
}
@@ -37,10 +37,13 @@ static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-static void destroyIndefinitOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyIndefinitOperatorInfo(void* param) {
SIndefOperatorInfo* pInfo = (SIndefOperatorInfo*)param;
- cleanupBasicInfo(&pInfo->binfo);
+ if (pInfo == NULL) {
+ return;
+ }
+ cleanupBasicInfo(&pInfo->binfo);
taosArrayDestroy(pInfo->pPseudoColInfo);
cleanupAggSup(&pInfo->aggSup);
cleanupExprSupp(&pInfo->scalarSup);
@@ -50,9 +53,11 @@ static void destroyIndefinitOperatorInfo(void* param, int32_t numOfOutput) {
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode,
SExecTaskInfo* pTaskInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
SProjectOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SProjectOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _error;
}
@@ -67,12 +72,11 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
pInfo->binfo.pRes = pResBlock;
pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
pInfo->pFilterNode = pProjPhyNode->node.pConditions;
- pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock;
-
- // todo remove it soon
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
pInfo->mergeDataBlocks = false;
+ } else {
+ pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock;
}
int32_t numOfRows = 4096;
@@ -83,9 +87,13 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
if (numOfRows * pResBlock->info.rowSize > TWOMB) {
numOfRows = TWOMB / pResBlock->info.rowSize;
}
+
initResultSizeInfo(&pOperator->resultInfo, numOfRows);
+ code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
- initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
initBasicInfo(&pInfo->binfo, pResBlock);
setFunctionResultOutput(pOperator, &pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols);
@@ -99,7 +107,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL,
destroyProjectOperatorInfo, NULL, NULL, NULL);
- int32_t code = appendDownstream(pOperator, &downstream, 1);
+ code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -107,7 +115,9 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
return pOperator;
_error:
- pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
+ destroyProjectOperatorInfo(pInfo);
+ taosMemoryFree(pOperator);
+ pTaskInfo->code = code;
return NULL;
}
@@ -175,7 +185,8 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
blockDataKeepFirstNRows(pBlock, keepRows);
//TODO: optimize it later when partition by + limit
- if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) || pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) {
+ if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) ||
+ (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
doSetOperatorCompleted(pOperator);
}
}
@@ -184,16 +195,6 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
return PROJECT_RETRIEVE_DONE;
}
-void printDataBlock1(SSDataBlock* pBlock, const char* flag) {
- if (!pBlock || pBlock->info.rows == 0) {
- qDebug("===stream===printDataBlock: Block is Null or Empty");
- return;
- }
- char* pBuf = NULL;
- qDebug("%s", dumpBlockData(pBlock, flag, &pBuf));
- taosMemoryFreeClear(pBuf);
-}
-
SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SProjectOperatorInfo* pProjectInfo = pOperator->info;
SOptrBasicInfo* pInfo = &pProjectInfo->binfo;
@@ -260,7 +261,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(downstream, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false);
@@ -269,7 +270,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs,
pProjectInfo->pPseudoColInfo);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
status = doIngroupLimitOffset(pLimitInfo, pBlock->info.groupId, pInfo->pRes, pOperator);
@@ -363,9 +364,12 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
initResultSizeInfo(&pOperator->resultInfo, numOfRows);
- initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&pInfo->binfo, pResBlock);
+ int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ initBasicInfo(&pInfo->binfo, pResBlock);
setFunctionResultOutput(pOperator, &pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfExpr);
pInfo->binfo.pRes = pResBlock;
@@ -381,7 +385,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doApplyIndefinitFunction, NULL, NULL,
destroyIndefinitOperatorInfo, NULL, NULL, NULL);
- int32_t code = appendDownstream(pOperator, &downstream, 1);
+ code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -389,7 +393,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
return pOperator;
_error:
- taosMemoryFree(pInfo);
+ destroyIndefinitOperatorInfo(pInfo);
taosMemoryFree(pOperator);
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
@@ -407,7 +411,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(downstream, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// there is an scalar expression that needs to be calculated before apply the group aggregation.
@@ -416,7 +420,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp
code = projectApplyFunctions(pScalarSup->pExprInfo, pBlock, pBlock, pScalarSup->pCtx, pScalarSup->numOfExprs,
pIndefInfo->pPseudoColInfo);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -426,7 +430,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp
code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs,
pIndefInfo->pPseudoColInfo);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 7b13aa8ad8fe5f33a8427d5a4d1e49e2ef1690ff..b740ec21d3f2a99d811d4bf2fa6142e80fcc7116 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -36,8 +36,8 @@
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity);
-static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size,
- const char* dbName);
+static int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta,
+ size_t size, const char* dbName);
static bool processBlockWithProbability(const SSampleExecInfo* pInfo);
@@ -178,8 +178,8 @@ static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t gro
STableScanInfo* pTableScanInfo = pOperator->info;
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
- GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
+ GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
if (p1 == NULL) {
return NULL;
@@ -250,7 +250,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
if (!allColumnsHaveAgg) {
@@ -264,7 +264,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
if (pBlock->pBlockAgg == NULL) {
pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
if (pBlock->pBlockAgg == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
}
}
@@ -374,7 +374,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock,
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -495,7 +495,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
while (tsdbNextDataBlock(pTableScanInfo->dataReader)) {
if (isTaskKilled(pTaskInfo)) {
- longjmp(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
}
// process this data block based on the probabilities
@@ -523,7 +523,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
int32_t code = loadDataBlock(pOperator, pTableScanInfo, pBlock, &status);
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pOperator->pTaskInfo->env, code);
+ T_LONG_JMP(pOperator->pTaskInfo->env, code);
}
// current block is filter out according to filter condition, continue load the next block
@@ -649,7 +649,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader,
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
return NULL;
}
}
@@ -689,7 +689,7 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr
return 0;
}
-static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyTableScanOperatorInfo(void* param) {
STableScanInfo* pTableScanInfo = (STableScanInfo*)param;
blockDataDestroy(pTableScanInfo->pResBlock);
cleanupQueryTableDataCond(&pTableScanInfo->cond);
@@ -837,7 +837,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
int32_t code = doGetTableRowSize(pBlockScanInfo->readHandle.meta, pBlockScanInfo->uid, &blockDistInfo.rowSize,
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
tsdbGetFileBlocksDistInfo(pBlockScanInfo->pHandle, &blockDistInfo);
@@ -863,7 +863,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
return pBlock;
}
-static void destroyBlockDistScanOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyBlockDistScanOperatorInfo(void* param) {
SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param;
blockDataDestroy(pDistInfo->pResBlock);
tsdbReaderClose(pDistInfo->pHandle);
@@ -920,49 +920,28 @@ static void doClearBufferedBlocks(SStreamScanInfo* pInfo) {
}
static bool isSessionWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
}
static bool isStateWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
}
static bool isIntervalWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
}
static bool isSignleIntervalWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
}
static bool isSlidingWindow(SStreamScanInfo* pInfo) {
return isIntervalWindow(pInfo) && pInfo->interval.interval != pInfo->interval.sliding;
}
-static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) {
- uint64_t* groupId = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &uid, sizeof(int64_t));
- if (groupId) {
- return *groupId;
- }
- return 0;
- /* Todo(liuyao) for partition by column
- recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, rowId);
- int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals);
- uint64_t resId = 0;
- uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len);
- if (groupId) {
- return *groupId;
- } else if (len != 0) {
- resId = calcGroupId(pTableScanInfo->keyBuf, len);
- taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &resId, sizeof(uint64_t));
- }
- return resId;
- */
-}
-
static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
@@ -976,6 +955,62 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin) {
pTableScanInfo->currentGroupId = -1;
}
+static void freeArray(void* array) {
+ taosArrayDestroy(array);
+}
+
+static void resetTableScanOperator(SOperatorInfo* pTableScanOp) {
+ STableScanInfo* pTableScanInfo = pTableScanOp->info;
+ pTableScanInfo->cond.startVersion = -1;
+ pTableScanInfo->cond.endVersion = -1;
+ SArray* gpTbls = pTableScanOp->pTaskInfo->tableqinfoList.pGroupList;
+ SArray* allTbls = pTableScanOp->pTaskInfo->tableqinfoList.pTableList;
+ taosArrayClearP(gpTbls, freeArray);
+ taosArrayPush(gpTbls, &allTbls);
+ STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
+ resetTableScanInfo(pTableScanOp->info, &win);
+}
+
+static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbUid, TSKEY startTs, TSKEY endTs, int64_t maxVersion) {
+ SArray* gpTbls = pTableScanOp->pTaskInfo->tableqinfoList.pGroupList;
+ taosArrayClear(gpTbls);
+ STableKeyInfo tblInfo = {.uid = tbUid, .groupId = 0};
+ SArray* tbls = taosArrayInit(1, sizeof(STableKeyInfo));
+ taosArrayPush(tbls, &tblInfo);
+ taosArrayPush(gpTbls, &tbls);
+
+ STimeWindow win = {.skey = startTs, .ekey = endTs};
+ STableScanInfo* pTableScanInfo = pTableScanOp->info;
+ pTableScanInfo->cond.startVersion = -1;
+ pTableScanInfo->cond.endVersion = maxVersion;
+ resetTableScanInfo(pTableScanOp->info, &win);
+ SSDataBlock* pRes = doTableScan(pTableScanOp);
+ resetTableScanOperator(pTableScanOp);
+ return pRes;
+}
+
+static uint64_t getGroupIdByCol(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts, int64_t maxVersion) {
+ SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, uid, ts, ts, maxVersion);
+ if (!pPreRes || pPreRes->info.rows == 0) {
+ return 0;
+ }
+ ASSERT(pPreRes->info.rows == 1);
+ return calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pPreRes, 0);
+}
+
+static uint64_t getGroupIdByData(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts, int64_t maxVersion) {
+ if (pInfo->partitionSup.needCalc) {
+ return getGroupIdByCol(pInfo, uid, ts, maxVersion);
+ }
+
+ SHashObj* map = pInfo->pTableScanOp->pTaskInfo->tableqinfoList.map;
+ uint64_t* groupId = taosHashGet(map, &uid, sizeof(int64_t));
+ if (groupId) {
+ return *groupId;
+ }
+ return 0;
+}
+
static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t* pRowIndex) {
if ((*pRowIndex) == pBlock->info.rows) {
return false;
@@ -987,6 +1022,9 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
STimeWindow win = {.skey = startData[*pRowIndex], .ekey = endData[*pRowIndex]};
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* gpData = (uint64_t*)pGpCol->pData;
+ uint64_t groupId = gpData[*pRowIndex];
SColumnInfoData* pCalStartTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
TSKEY* calStartData = (TSKEY*)pCalStartTsCol->pData;
@@ -1001,11 +1039,11 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
(*pRowIndex)++;
for (; *pRowIndex < pBlock->info.rows; (*pRowIndex)++) {
- if (win.skey == startData[*pRowIndex]) {
+ if (win.skey == startData[*pRowIndex] && groupId == gpData[*pRowIndex]) {
win.ekey = TMAX(win.ekey, endData[*pRowIndex]);
continue;
}
- if (win.skey == endData[*pRowIndex]) {
+ if (win.skey == endData[*pRowIndex] && groupId == gpData[*pRowIndex]) {
win.skey = TMIN(win.skey, startData[*pRowIndex]);
continue;
}
@@ -1020,15 +1058,19 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
}
static STimeWindow getSlidingWindow(TSKEY* tsCol, SInterval* pInterval, SDataBlockInfo* pDataBlockInfo,
- int32_t* pRowIndex) {
+ int32_t* pRowIndex, bool hasGroup) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[*pRowIndex], pInterval, TSDB_ORDER_ASC);
STimeWindow endWin = win;
STimeWindow preWin = win;
while (1) {
- (*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, tsCol, *pRowIndex, endWin.ekey, binarySearchForKey, NULL,
- TSDB_ORDER_ASC);
+ if (hasGroup) {
+ (*pRowIndex) += 1;
+ } else {
+ (*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, tsCol, *pRowIndex, endWin.ekey,
+ binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ }
do {
preWin = endWin;
getNextTimeWindow(pInterval, &endWin, TSDB_ORDER_ASC);
@@ -1060,7 +1102,26 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32
return NULL;
}
- if (pResult->info.groupId == pInfo->groupId) {
+ if (pInfo->partitionSup.needCalc) {
+ SSDataBlock* tmpBlock = createOneDataBlock(pResult, true);
+ blockDataCleanup(pResult);
+ for (int32_t i = 0; i < tmpBlock->info.rows; i++) {
+ if (calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, tmpBlock, i) == pInfo->groupId) {
+ for (int32_t j = 0; j < pInfo->pTableScanOp->exprSupp.numOfExprs; j++) {
+ SColumnInfoData* pSrcCol = taosArrayGet(tmpBlock->pDataBlock, j);
+ SColumnInfoData* pDestCol = taosArrayGet(pResult->pDataBlock, j);
+ bool isNull = colDataIsNull(pSrcCol, tmpBlock->info.rows, i, NULL);
+ char* pSrcData = colDataGetData(pSrcCol, i);
+ colDataAppend(pDestCol, pResult->info.rows, pSrcData, isNull);
+ }
+ pResult->info.rows++;
+ }
+ }
+ if (pResult->info.rows > 0) {
+ pResult->info.calWin = pInfo->updateWin;
+ return pResult;
+ }
+ } else if (pResult->info.groupId == pInfo->groupId) {
pResult->info.calWin = pInfo->updateWin;
return pResult;
}
@@ -1086,23 +1147,31 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pDestUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX);
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
+ SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
int32_t dummy = 0;
+ int64_t version = pSrcBlock->info.version - 1;
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
- uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[i]);
+ uint64_t groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], version);
// gap must be 0.
SResultWindowInfo* pStartWin =
- getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, startData[i], endData[i], groupId, 0, &dummy);
+ getCurSessionWindow(pInfo->windowSup.pStreamAggSup, startData[i], endData[i], groupId, 0, &dummy);
if (!pStartWin) {
// window has been closed.
continue;
}
SResultWindowInfo* pEndWin =
- getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, endData[i], endData[i], groupId, 0, &dummy);
+ getCurSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, 0, &dummy);
ASSERT(pEndWin);
+ TSKEY ts = INT64_MIN;
colDataAppend(pDestStartCol, i, (const char*)&pStartWin->win.skey, false);
colDataAppend(pDestEndCol, i, (const char*)&pEndWin->win.ekey, false);
+ colDataAppendNULL(pDestUidCol, i);
colDataAppend(pDestGpCol, i, (const char*)&groupId, false);
+ colDataAppendNULL(pDestCalStartTsCol, i);
+ colDataAppendNULL(pDestCalEndTsCol, i);
pDestBlock->info.rows++;
}
return TSDB_CODE_SUCCESS;
@@ -1114,34 +1183,49 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
if (rows == 0) {
return TSDB_CODE_SUCCESS;
}
- int32_t code = blockDataEnsureCapacity(pDestBlock, rows);
+ int32_t code = blockDataEnsureCapacity(pDestBlock, rows * 2);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- SColumnInfoData* pTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
- SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
- uint64_t* uidCol = (uint64_t*)pUidCol->pData;
- ASSERT(pTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
- TSKEY* tsCol = (TSKEY*)pTsCol->pData;
+ SColumnInfoData* pSrcTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pSrcUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
+ uint64_t* srcUidData = (uint64_t*)pSrcUidCol->pData;
+ SColumnInfoData* pSrcGpCol = taosArrayGet(pSrcBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* srcGp = (uint64_t*)pSrcGpCol->pData;
+ ASSERT(pSrcTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
+ TSKEY* tsCol = (TSKEY*)pSrcTsCol->pData;
SColumnInfoData* pStartTsCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pDeUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX);
SColumnInfoData* pGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
- uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[0]);
+ int64_t version = pSrcBlock->info.version - 1;
for (int32_t i = 0; i < rows;) {
- colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(tsCol + i), false);
- STimeWindow win = getSlidingWindow(tsCol, &pInfo->interval, &pSrcBlock->info, &i);
- colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(tsCol + i - 1), false);
-
+ uint64_t srcUid = srcUidData[i];
+ uint64_t groupId = getGroupIdByData(pInfo, srcUid, tsCol[i], version);
+ uint64_t srcGpId = srcGp[i];
+ TSKEY calStartTs = tsCol[i];
+ colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
+ STimeWindow win = getSlidingWindow(tsCol, &pInfo->interval, &pSrcBlock->info, &i, pInfo->partitionSup.needCalc);
+ TSKEY calEndTs = tsCol[i - 1];
+ colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(&calEndTs), false);
+ colDataAppend(pDeUidCol, pDestBlock->info.rows, (const char*)(&srcUid), false);
colDataAppend(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false);
colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false);
pDestBlock->info.rows++;
+ if (pInfo->partitionSup.needCalc && srcGpId != 0 && groupId != srcGpId) {
+ colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
+ colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(&calEndTs), false);
+ colDataAppend(pDeUidCol, pDestBlock->info.rows, (const char*)(&srcUid), false);
+ colDataAppend(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false);
+ colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
+ colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&srcGpId), false);
+ pDestBlock->info.rows++;
+ }
}
- // all rows have same group id
- pDestBlock->info.groupId = groupId;
return TSDB_CODE_SUCCESS;
}
@@ -1153,17 +1237,20 @@ static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock,
code = generateSessionScanRange(pInfo, pSrcBlock, pDestBlock);
}
pDestBlock->info.type = STREAM_CLEAR;
+ pDestBlock->info.version = pSrcBlock->info.version;
blockDataUpdateTsWindow(pDestBlock, 0);
return code;
}
-void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid) {
+void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp) {
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)pStartTs, false);
colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false);
colDataAppend(pUidCol, pBlock->info.rows, (const char*)pUid, false);
+ colDataAppend(pGpCol, pBlock->info.rows, (const char*)pGp, false);
pBlock->info.rows++;
}
@@ -1188,31 +1275,25 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
// must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
bool closedWin = isClosed && isSignleIntervalWindow(pInfo) &&
- isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup);
+ isDeletedWindow(&win, pBlock->info.groupId, pInfo->windowSup.pIntervalAggSup);
if ((update || closedWin) && out) {
- appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid);
+ uint64_t gpId = closedWin&&pInfo->partitionSup.needCalc ?
+ calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pBlock, rowId) : 0;
+ appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid,
+ &gpId);
}
}
- if (out) {
+ if (out && pInfo->pUpdateDataRes->info.rows > 0) {
+ pInfo->pUpdateDataRes->info.version = pBlock->info.version;
blockDataUpdateTsWindow(pInfo->pUpdateDataRes, 0);
- pInfo->pUpdateDataRes->info.type = STREAM_CLEAR;
- }
-}
-
-static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t uidColIndex) {
- ASSERT(taosArrayGetSize(pBlock->pDataBlock) >= 3);
- SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, uidColIndex);
- uint64_t* uidCol = (uint64_t*)pColDataInfo->pData;
- ASSERT(pBlock->info.rows > 0);
- for (int32_t i = 0; i < pBlock->info.rows; i++) {
- uidCol[i] = getGroupId(pOperator, uidCol[i]);
+ pInfo->pUpdateDataRes->info.type = pInfo->partitionSup.needCalc ? STREAM_DELETE_DATA : STREAM_CLEAR;
}
}
static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock) {
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
SOperatorInfo* pOperator = pInfo->pStreamScanOp;
- SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
@@ -1221,7 +1302,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
pInfo->pRes->info.type = STREAM_NORMAL;
pInfo->pRes->info.version = pBlock->info.version;
- uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
+ uint64_t* groupIdPre = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupIdPre) {
pInfo->pRes->info.groupId = *groupIdPre;
} else {
@@ -1259,7 +1340,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
blockDataFreeRes((SSDataBlock*)pBlock);
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -1269,12 +1350,34 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
return 0;
}
-static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
- // NOTE: this operator does never check if current status is done or not
+static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamScanInfo* pInfo = pOperator->info;
- qDebug("stream scan called");
+ qDebug("queue scan called");
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
+ if (pResult && pResult->info.rows > 0) {
+ qDebug("queue scan tsdb return %d rows", pResult->info.rows);
+ pTaskInfo->streamInfo.returned = 1;
+ return pResult;
+ } else {
+ if (!pTaskInfo->streamInfo.returned) {
+ STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
+ tsdbReaderClose(pTSInfo->dataReader);
+ pTSInfo->dataReader = NULL;
+ tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer);
+ qDebug("queue scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1);
+ if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) {
+ return NULL;
+ }
+ ASSERT(pInfo->tqReader->pWalReader->curVersion == pTaskInfo->streamInfo.snapshotVer + 1);
+ } else {
+ return NULL;
+ }
+ }
+ }
+
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
while (1) {
SFetchRet ret = {0};
@@ -1286,21 +1389,21 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
}
// TODO clean data block
if (pInfo->pRes->info.rows > 0) {
- qDebug("stream scan log return %d rows", pInfo->pRes->info.rows);
+ qDebug("queue scan log return %d rows", pInfo->pRes->info.rows);
return pInfo->pRes;
}
} else if (ret.fetchType == FETCH_TYPE__META) {
ASSERT(0);
- pTaskInfo->streamInfo.lastStatus = ret.offset;
- pTaskInfo->streamInfo.metaBlk = ret.meta;
- return NULL;
+ // pTaskInfo->streamInfo.lastStatus = ret.offset;
+ // pTaskInfo->streamInfo.metaBlk = ret.meta;
+ // return NULL;
} else if (ret.fetchType == FETCH_TYPE__NONE) {
pTaskInfo->streamInfo.lastStatus = ret.offset;
ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version);
ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion);
char formatBuf[80];
tFormatOffset(formatBuf, 80, &ret.offset);
- qDebug("stream scan log return null, offset %s", formatBuf);
+ qDebug("queue scan log return null, offset %s", formatBuf);
return NULL;
} else {
ASSERT(0);
@@ -1314,11 +1417,53 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
}
qDebug("stream scan tsdb return null");
return NULL;
- } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
- // TODO scan meta
+ } else {
ASSERT(0);
return NULL;
}
+}
+
+static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
+ // NOTE: this operator does never check if current status is done or not
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamScanInfo* pInfo = pOperator->info;
+
+ qDebug("stream scan called");
+#if 0
+ SStreamState* pState = pTaskInfo->streamInfo.pState;
+ if (pState) {
+ printf(">>>>>>>> stream write backend\n");
+ SWinKey key = {
+ .ts = 1,
+ .groupId = 2,
+ };
+ char tmp[100] = "abcdefg1";
+ if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ key.ts = 2;
+ char tmp2[100] = "abcdefg2";
+ if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ key.groupId = 5;
+ key.ts = 1;
+ char tmp3[100] = "abcdefg3";
+ if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ char* val2 = NULL;
+ int32_t sz;
+ if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) {
+ ASSERT(0);
+ }
+ printf("stream read %s %d\n", val2, sz);
+ streamFreeVal(val2);
+ }
+#endif
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
@@ -1376,6 +1521,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
default:
break;
}
+ // printDataBlock(pBlock, "stream scan recv");
return pBlock;
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
qDebug("scan mode %d", pInfo->scanMode);
@@ -1385,6 +1531,14 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
return pInfo->pRes;
} break;
+ case STREAM_SCAN_FROM_DELETERES: {
+ generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
+ prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
+ pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
+ copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
+ pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA;
+ return pInfo->pDeleteDataRes;
+ } break;
case STREAM_SCAN_FROM_UPDATERES: {
generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
@@ -1400,6 +1554,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId, version);
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
+ // printDataBlock(pSDB, "stream scan update");
return pSDB;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
@@ -1408,7 +1563,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
break;
}
- SStreamAggSupporter* pSup = pInfo->sessionSup.pStreamAggSup;
+ SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup;
if (isStateWindow(pInfo) && pSup->pScanBlock->info.rows > 0) {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
pInfo->updateResIndex = 0;
@@ -1474,7 +1629,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
// record the scan action.
pInfo->numOfExec++;
pOperator->resultInfo.totalRows += pBlockInfo->rows;
- printDataBlock(pInfo->pRes, "stream scan");
+ // printDataBlock(pInfo->pRes, "stream scan");
if (pBlockInfo->rows == 0) {
updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
@@ -1483,30 +1638,26 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
checkUpdateData(pInfo, true, pInfo->pRes, true);
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlockInfo->window.ekey);
if (pInfo->pUpdateDataRes->info.rows > 0) {
+ pInfo->updateResIndex = 0;
if (pInfo->pUpdateDataRes->info.type == STREAM_CLEAR) {
- pInfo->updateResIndex = 0;
pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
} else if (pInfo->pUpdateDataRes->info.type == STREAM_INVERT) {
pInfo->scanMode = STREAM_SCAN_FROM_RES;
return pInfo->pUpdateDataRes;
+ } else if (pInfo->pUpdateDataRes->info.type == STREAM_DELETE_DATA) {
+ pInfo->scanMode = STREAM_SCAN_FROM_DELETERES;
}
}
}
qDebug("scan rows: %d", pBlockInfo->rows);
return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes;
-
} else {
ASSERT(0);
return NULL;
}
}
-static SSDataBlock* doRawScan(SOperatorInfo* pInfo) {
- //
- return NULL;
-}
-
static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
SArray* tableIdList = taosArrayInit(4, sizeof(uint64_t));
@@ -1519,24 +1670,162 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
return tableIdList;
}
+static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
+ // NOTE: this operator does never check if current status is done or not
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta
+ pTaskInfo->streamInfo.metaRsp.metaRsp = NULL;
+
+ qDebug("tmqsnap doRawScan called");
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SSDataBlock* pBlock = &pInfo->pRes;
+
+ if (pInfo->dataReader && tsdbNextDataBlock(pInfo->dataReader)) {
+ if (isTaskKilled(pTaskInfo)) {
+ longjmp(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+ }
+
+ tsdbRetrieveDataBlockInfo(pInfo->dataReader, &pBlock->info);
+
+ SArray* pCols = tsdbRetrieveDataBlock(pInfo->dataReader, NULL);
+ pBlock->pDataBlock = pCols;
+ if (pCols == NULL) {
+ longjmp(pTaskInfo->env, terrno);
+ }
+
+ qDebug("tmqsnap doRawScan get data uid:%ld", pBlock->info.uid);
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.uid;
+ pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
+ return pBlock;
+ }
+
+ SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
+ if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
+ qDebug("tmqsnap read snapshot done, change to get data from wal");
+ pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ pTaskInfo->streamInfo.lastStatus.version = pInfo->sContext->snapVersion;
+ } else {
+ pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
+ pTaskInfo->streamInfo.prepareStatus.ts = INT64_MIN;
+ qDebug("tmqsnap change get data uid:%ld", mtInfo.uid);
+ qStreamPrepareScan(pTaskInfo, &pTaskInfo->streamInfo.prepareStatus, pInfo->sContext->subType);
+ }
+ qDebug("tmqsnap stream scan tsdb return null");
+ return NULL;
+ } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
+ SSnapContext* sContext = pInfo->sContext;
+ void* data = NULL;
+ int32_t dataLen = 0;
+ int16_t type = 0;
+ int64_t uid = 0;
+ if (getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0) {
+ qError("tmqsnap getMetafromSnapShot error");
+ taosMemoryFreeClear(data);
+ return NULL;
+ }
+
+ if (!sContext->queryMetaOrData) { // change to get data next poll request
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
+ pTaskInfo->streamInfo.lastStatus.uid = uid;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.uid = 0;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.ts = INT64_MIN;
+ } else {
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
+ pTaskInfo->streamInfo.lastStatus.uid = uid;
+ pTaskInfo->streamInfo.metaRsp.rspOffset = pTaskInfo->streamInfo.lastStatus;
+ pTaskInfo->streamInfo.metaRsp.resMsgType = type;
+ pTaskInfo->streamInfo.metaRsp.metaRspLen = dataLen;
+ pTaskInfo->streamInfo.metaRsp.metaRsp = data;
+ }
+
+ return NULL;
+ }
+ // else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
+ // int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1;
+ //
+ // while(1){
+ // if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) {
+ // qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer);
+ // pTaskInfo->streamInfo.lastStatus.version = fetchVer;
+ // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ // return NULL;
+ // }
+ // SWalCont* pHead = &pInfo->pCkHead->head;
+ // qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType);
+ //
+ // if (pHead->msgType == TDMT_VND_SUBMIT) {
+ // SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
+ // tqReaderSetDataMsg(pInfo->tqReader, pCont, 0);
+ // SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid,
+ // &pInfo->pRes); if(block){
+ // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ // pTaskInfo->streamInfo.lastStatus.version = fetchVer;
+ // qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
+ // return block;
+ // }else{
+ // fetchVer++;
+ // }
+ // } else{
+ // ASSERT(pInfo->sContext->withMeta);
+ // ASSERT(IS_META_MSG(pHead->msgType));
+ // qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
+ // pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer;
+ // pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG;
+ // pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType;
+ // pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen;
+ // pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen);
+ // memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen);
+ // return NULL;
+ // }
+ // }
+ return NULL;
+}
+
+static void destroyRawScanOperatorInfo(void* param) {
+ SStreamRawScanInfo* pRawScan = (SStreamRawScanInfo*)param;
+ tsdbReaderClose(pRawScan->dataReader);
+ destroySnapContext(pRawScan->sContext);
+ taosMemoryFree(pRawScan);
+}
+
// for subscribing db or stb (not including column),
// if this scan is used, meta data can be return
// and schemas are decided when scanning
-SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode,
- SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup) {
+SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo) {
// create operator
// create tb reader
// create meta reader
// create tq reader
- return NULL;
+ SStreamRawScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamRawScanInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ pInfo->vnode = pHandle->vnode;
+
+ pInfo->sContext = pHandle->sContext;
+ pOperator->name = "RawStreamScanOperator";
+ // pOperator->blocking = false;
+ // pOperator->status = OP_NOT_OPENED;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+
+ pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo, NULL, NULL, NULL);
+ return pOperator;
}
-static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyStreamScanOperatorInfo(void* param) {
SStreamScanInfo* pStreamScan = (SStreamScanInfo*)param;
if (pStreamScan->pTableScanOp && pStreamScan->pTableScanOp->info) {
STableScanInfo* pTableScanInfo = pStreamScan->pTableScanOp->info;
- destroyTableScanOperatorInfo(pTableScanInfo, numOfOutput);
+ destroyTableScanOperatorInfo(pTableScanInfo);
taosMemoryFreeClear(pStreamScan->pTableScanOp);
}
if (pStreamScan->tqReader) {
@@ -1547,7 +1836,7 @@ static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
}
if (pStreamScan->pPseudoExpr) {
destroyExprInfo(pStreamScan->pPseudoExpr, pStreamScan->numOfPseudoExpr);
- taosMemoryFreeClear(pStreamScan->pPseudoExpr);
+ taosMemoryFree(pStreamScan->pPseudoExpr);
}
updateInfoDestroy(pStreamScan->pUpdateInfo);
@@ -1626,18 +1915,14 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
ASSERT(pHandle->tqReader);
pInfo->tqReader = pHandle->tqReader;
}
-
- if (pTSInfo->pdInfo.interval.interval > 0) {
- pInfo->pUpdateInfo = updateInfoInitP(&pTSInfo->pdInfo.interval, pInfo->twAggSup.waterMark);
- } else {
- pInfo->pUpdateInfo = NULL;
- }
-
+
+ pInfo->pUpdateInfo = NULL;
pInfo->pTableScanOp = pTableScanOp;
pInfo->interval = pTSInfo->pdInfo.interval;
pInfo->readHandle = *pHandle;
pInfo->tableUid = pScanPhyNode->uid;
+ pTaskInfo->streamInfo.snapshotVer = pHandle->version;
// set the extract column id to streamHandle
tqReaderSetColIdList(pInfo->tqReader, pColIds);
@@ -1662,8 +1947,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR);
pInfo->pCondition = pScanPhyNode->node.pConditions;
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
- pInfo->sessionSup =
- (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
+ pInfo->windowSup =
+ (SWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
pInfo->groupId = 0;
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
pInfo->pStreamScanOp = pOperator;
@@ -1672,6 +1957,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
pInfo->pUpdateDataRes = createSpecialDataBlock(STREAM_CLEAR);
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
+ pInfo->partitionSup.needCalc = false;
pOperator->name = "StreamScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
@@ -1681,8 +1967,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
- pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamScan, NULL, NULL, destroyStreamScanOperatorInfo,
- NULL, NULL, NULL);
+ __optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan;
+ pOperator->fpSet =
+ createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL, NULL, NULL);
return pOperator;
@@ -1692,7 +1979,7 @@ _error:
return NULL;
}
-static void destroySysScanOperator(void* param, int32_t numOfOutput) {
+static void destroySysScanOperator(void* param) {
SSysTableScanInfo* pInfo = (SSysTableScanInfo*)param;
tsem_destroy(&pInfo->ready);
blockDataDestroy(pInfo->pRes);
@@ -1950,7 +2237,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
metaReaderClear(&smr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
char stableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
@@ -2153,7 +2440,7 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
metaReaderClear(&mr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
// number of columns
@@ -2385,10 +2672,10 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) {
const SSysTableMeta* pSysDbTableMeta = NULL;
getInfosDbMeta(&pSysDbTableMeta, &size);
- p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB);
+ p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB);
getPerfDbMeta(&pSysDbTableMeta, &size);
- p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB);
+ p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB);
pInfo->pRes->info.rows = p->info.rows;
relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
@@ -2397,13 +2684,16 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) {
return pInfo->pRes->info.rows;
}
-int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size,
+int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size,
const char* dbName) {
char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t numOfRows = p->info.rows;
for (int32_t i = 0; i < size; ++i) {
const SSysTableMeta* pm = &pSysDbTableMeta[i];
+ if (!sysInfo && pm->sysInfo) {
+ continue;
+ }
SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0);
@@ -2457,6 +2747,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
pInfo->accountId = pScanPhyNode->accountId;
pInfo->pUser = taosMemoryStrDup((void*)pUser);
+ pInfo->sysInfo = pScanPhyNode->sysInfo;
pInfo->showRewrite = pScanPhyNode->showRewrite;
pInfo->pRes = pResBlock;
pInfo->pCondition = pScanNode->node.pConditions;
@@ -2527,7 +2818,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", item->uid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
metaReaderClear(&mr);
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
@@ -2577,12 +2868,10 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
return (pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
-static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyTagScanOperatorInfo(void* param) {
STagScanInfo* pInfo = (STagScanInfo*)param;
pInfo->pRes = blockDataDestroy(pInfo->pRes);
-
taosArrayDestroy(pInfo->pColMatchInfo);
-
taosMemoryFreeClear(param);
}
@@ -2642,6 +2931,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
+ qError("failed to getTableList, code: %s", tstrerror(code));
return code;
}
@@ -2777,7 +3067,7 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pseudoSup.pExprInfo,
pTableScanInfo->pseudoSup.numOfExprs, pBlock, GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -2820,7 +3110,7 @@ static SSDataBlock* getTableDataBlock(void* param) {
STsdbReader* reader = taosArrayGetP(pTableScanInfo->dataReaders, readerIdx);
while (tsdbNextDataBlock(reader)) {
if (isTaskKilled(pOperator->pTaskInfo)) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+ T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
}
// process this data block based on the probabilities
@@ -2843,7 +3133,7 @@ static SSDataBlock* getTableDataBlock(void* param) {
int32_t code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, readerIdx, pBlock, &status);
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pOperator->pTaskInfo->env, code);
+ T_LONG_JMP(pOperator->pTaskInfo->env, code);
}
// current block is filter out according to filter condition, continue load the next block
@@ -2936,7 +3226,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
int32_t code = tsortOpen(pInfo->pSortHandle);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
return TSDB_CODE_SUCCESS;
@@ -3006,7 +3296,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
int32_t code = pOperator->fpSet._openFn(pOperator);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList);
if (!pInfo->hasGroupId) {
@@ -3044,7 +3334,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
return pBlock;
}
-void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyTableMergeScanOperatorInfo(void* param) {
STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param;
cleanupQueryTableDataCond(&pTableScanInfo->cond);
taosArrayDestroy(pTableScanInfo->sortSourceParams);
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index 4dd5e4ec15e9521b6c2cdc39562313592242773c..e2014ec97320c863a6857e94c538bd8d8319c2a1 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -20,7 +20,7 @@ static SSDataBlock* doSort(SOperatorInfo* pOperator);
static int32_t doOpenSortOperator(SOperatorInfo* pOperator);
static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
-static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyOrderOperatorInfo(void* param);
// todo add limit/offset impl
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo) {
@@ -156,7 +156,7 @@ void applyScalarFunction(SSDataBlock* pBlock, void* param) {
int32_t code = projectApplyFunctions(pOperator->exprSupp.pExprInfo, pBlock, pBlock, pOperator->exprSupp.pCtx,
pOperator->exprSupp.numOfExprs, NULL);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pOperator->pTaskInfo->env, code);
+ T_LONG_JMP(pOperator->pTaskInfo->env, code);
}
}
}
@@ -184,7 +184,7 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) {
taosMemoryFreeClear(ps);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs) / 1000.0;
@@ -204,7 +204,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
int32_t code = pOperator->fpSet._openFn(pOperator);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
SSDataBlock* pBlock = NULL;
@@ -250,7 +250,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
return blockDataGetNumOfRows(pBlock) > 0 ? pBlock : NULL;
}
-void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyOrderOperatorInfo(void* param) {
SSortOperatorInfo* pInfo = (SSortOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
@@ -388,7 +388,7 @@ int32_t beginSortGroup(SOperatorInfo* pOperator) {
taosMemoryFreeClear(ps);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
return TSDB_CODE_SUCCESS;
@@ -420,7 +420,7 @@ SSDataBlock* doGroupSort(SOperatorInfo* pOperator) {
int32_t code = pOperator->fpSet._openFn(pOperator);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
if (!pInfo->hasGroupId) {
@@ -468,7 +468,7 @@ int32_t getGroupSortExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, u
return TSDB_CODE_SUCCESS;
}
-void destroyGroupSortOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyGroupSortOperatorInfo(void* param) {
SGroupSortOperatorInfo* pInfo = (SGroupSortOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
@@ -575,7 +575,7 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) {
int32_t code = tsortOpen(pInfo->pSortHandle);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs) / 1000.0;
@@ -672,7 +672,7 @@ SSDataBlock* doMultiwayMerge(SOperatorInfo* pOperator) {
int32_t code = pOperator->fpSet._openFn(pOperator);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
SSDataBlock* pBlock = getMultiwaySortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes,
@@ -685,7 +685,7 @@ SSDataBlock* doMultiwayMerge(SOperatorInfo* pOperator) {
return pBlock;
}
-void destroyMultiwayMergeOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyMultiwayMergeOperatorInfo(void* param) {
SMultiwayMergeOperatorInfo* pInfo = (SMultiwayMergeOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
pInfo->pInputBlock = blockDataDestroy(pInfo->pInputBlock);
diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c
index 59dd58070d1b96fa5003c0162957d6e8adedb061..f23552c5a7b82207ffc368dbae7c1894cb6a8edd 100644
--- a/source/libs/executor/src/tfill.c
+++ b/source/libs/executor/src/tfill.c
@@ -36,6 +36,7 @@
#define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId)
static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey);
+static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex);
static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowIndex) {
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
@@ -43,9 +44,8 @@ static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowInd
int32_t dstSlotId = GET_DEST_SLOT_ID(pCol);
SColumnInfoData* pDstColInfo = taosArrayGet(pBlock->pDataBlock, dstSlotId);
if (pCol->notFillCol) {
- if (pDstColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDstColInfo, rowIndex, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfo, rowIndex);
+ if (!filled) {
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfo, rowIndex, pKey);
@@ -76,6 +76,35 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32
}
}
+//fill windows pseudo column, _wstart, _wend, _wduration and return true, otherwise return false
+static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex) {
+ if (!pCol->notFillCol) {
+ return false;
+ }
+ if (pCol->pExpr->pExpr->nodeType == QUERY_NODE_COLUMN) {
+ if (pCol->pExpr->base.numOfParams != 1) {
+ return false;
+ }
+ if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_START) {
+ colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->currentKey, false);
+ return true;
+ } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_END) {
+ //TODO: include endpoint
+ SInterval* pInterval = &pFillInfo->interval;
+ int32_t step = (pFillInfo->order == TSDB_ORDER_ASC) ? 1 : -1;
+ int64_t windowEnd =
+ taosTimeAdd(pFillInfo->currentKey, pInterval->sliding * step, pInterval->slidingUnit, pInterval->precision);
+ colDataAppend(pDstColInfoData, rowIndex, (const char*)&windowEnd, false);
+ return true;
+ } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_DURATION) {
+ //TODO: include endpoint
+ colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->interval.sliding, false);
+ return true;
+ }
+ }
+ return false;
+}
+
static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* pSrcBlock, int64_t ts,
bool outOfBound) {
SPoint point1, point2, point;
@@ -92,10 +121,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol));
-
- if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index);
+ if (!filled) {
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfoData, index, pKey);
}
@@ -106,10 +133,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol));
-
- if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index);
+ if (!filled) {
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfoData, index, pKey);
}
@@ -127,9 +152,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
int16_t type = pDstCol->info.type;
if (pCol->notFillCol) {
- if (type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDstCol, index, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstCol, index);
+ if (!filled) {
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstCol, index, pKey);
@@ -170,9 +194,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, slotId);
if (pCol->notFillCol) {
- if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDst, index);
+ if (!filled) {
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDst, index, pKey);
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 0594a727fcaaf8f41a55703e64d7247a2dca6d15..26667cd64fe2e72effcf73af291d0ff71581ebe4 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -15,6 +15,7 @@
#include "executorimpl.h"
#include "function.h"
#include "functionMgt.h"
+#include "tcommon.h"
#include "tcompare.h"
#include "tdatablock.h"
#include "tfill.h"
@@ -27,21 +28,21 @@ typedef enum SResultTsInterpType {
#define IS_FINAL_OP(op) ((op)->isFinal)
-typedef struct SWinRes {
- TSKEY ts;
- uint64_t groupId;
-} SWinRes;
-
typedef struct SPullWindowInfo {
STimeWindow window;
uint64_t groupId;
} SPullWindowInfo;
+typedef struct SOpenWindowInfo {
+ SResultRowPosition pos;
+ uint64_t groupId;
+} SOpenWindowInfo;
+
static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator);
static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo);
-static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult);
+static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult, uint64_t groupId);
static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult);
///*
@@ -602,14 +603,14 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
int32_t startPos = 0;
int32_t numOfOutput = pSup->numOfExprs;
- uint64_t groupId = pBlock->info.groupId;
SResultRow* pResult = NULL;
while (1) {
SListNode* pn = tdListGetHead(pResultRowInfo->openWindow);
-
- SResultRowPosition* p1 = (SResultRowPosition*)pn->data;
+ SOpenWindowInfo* pOpenWin = (SOpenWindowInfo *)pn->data;
+ uint64_t groupId = pOpenWin->groupId;
+ SResultRowPosition* p1 = &pOpenWin->pos;
if (p->pageId == p1->pageId && p->offset == p1->offset) {
break;
}
@@ -628,20 +629,24 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &w, (scanFlag == MAIN_SCAN), &pResult, groupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
ASSERT(!isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP));
SGroupKeys* pTsKey = taosArrayGet(pInfo->pPrevValues, 0);
int64_t prevTs = *(int64_t*)pTsKey->pData;
- doTimeWindowInterpolation(pInfo->pPrevValues, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, w.ekey,
- RESULT_ROW_END_INTERP, pSup);
+ if (groupId == pBlock->info.groupId) {
+ doTimeWindowInterpolation(pInfo->pPrevValues, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, w.ekey,
+ RESULT_ROW_END_INTERP, pSup);
+ }
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows, numOfExprs);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &w, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows,
+ numOfExprs);
if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) {
closeResultRow(pr);
@@ -812,7 +817,7 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) {
int32_t compareResKey(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
SResKeyPos* pos = taosArrayGetP(res, index);
- SWinRes* pData = (SWinRes*)pKey;
+ SWinKey* pData = (SWinKey*)pKey;
if (pData->ts == *(int64_t*)pos->key) {
if (pData->groupId > pos->groupId) {
return 1;
@@ -828,7 +833,7 @@ int32_t compareResKey(void* pKey, void* data, int32_t index) {
static int32_t saveResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SArray* pUpdated) {
int32_t size = taosArrayGetSize(pUpdated);
- SWinRes data = {.ts = ts, .groupId = groupId};
+ SWinKey data = {.ts = ts, .groupId = groupId};
int32_t index = binarySearchCom(pUpdated, size, &data, TSDB_ORDER_DESC, compareResKey);
if (index == -1) {
index = 0;
@@ -861,8 +866,8 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_
newPos->groupId = groupId;
newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset};
*(int64_t*)newPos->key = ts;
- SWinRes key = {.ts = ts, .groupId = groupId};
- if (taosHashPut(pUpdatedMap, &key, sizeof(SWinRes), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) {
+ SWinKey key = {.ts = ts, .groupId = groupId};
+ if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) {
taosMemoryFree(newPos);
}
return TSDB_CODE_SUCCESS;
@@ -879,20 +884,20 @@ static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpda
static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) {
int32_t size = taosArrayGetSize(pWins);
for (int32_t i = 0; i < size; i++) {
- SWinRes* pW = taosArrayGet(pWins, i);
- taosHashRemove(pUpdatedMap, pW, sizeof(SWinRes));
+ SWinKey* pW = taosArrayGet(pWins, i);
+ taosHashRemove(pUpdatedMap, pW, sizeof(SWinKey));
}
}
int64_t getWinReskey(void* data, int32_t index) {
SArray* res = (SArray*)data;
- SWinRes* pos = taosArrayGet(res, index);
+ SWinKey* pos = taosArrayGet(res, index);
return pos->ts;
}
int32_t compareWinRes(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
- SWinRes* pos = taosArrayGetP(res, index);
+ SWinKey* pos = taosArrayGet(res, index);
SResKeyPos* pData = (SResKeyPos*)pKey;
if (*(int64_t*)pData->key == pos->ts) {
if (pData->groupId > pos->groupId) {
@@ -914,10 +919,11 @@ static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) {
}
void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
- SResKeyPos* pResKey = (SResKeyPos*)pIte;
+ SResKeyPos* pResKey = *(SResKeyPos**)pIte;
int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes);
if (index >= 0 && 0 == compareWinRes(pResKey, pDelWins, index)) {
taosArrayRemove(pDelWins, index);
+ delSize = taosArrayGetSize(pDelWins);
}
}
}
@@ -952,7 +958,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
@@ -968,14 +974,14 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
// prev time window not interpolation yet.
if (pInfo->timeWindowInterpo) {
- SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult);
+ SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos);
// restore current time window
ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
// window start key interpolation
@@ -985,8 +991,8 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
- pBlock->info.rows, numOfOutput);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
}
doCloseWindow(pResultRowInfo, pInfo, pResult);
@@ -1009,7 +1015,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
@@ -1020,13 +1026,21 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
ekey = ascScan ? nextWin.ekey : nextWin.skey;
forwardRows =
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
-
// window start(end) key interpolation
doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup);
-
+ //TODO: add to open window? how to close the open windows after input blocks exhausted?
+#if 0
+ if ((ascScan && ekey <= pBlock->info.window.ekey) ||
+ (!ascScan && ekey >= pBlock->info.window.skey)) {
+ // window start(end) key interpolation
+ doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup);
+ } else if (pInfo->timeWindowInterpo) {
+ addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
+ }
+#endif
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
- pBlock->info.rows, numOfOutput);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
doCloseWindow(pResultRowInfo, pInfo, pResult);
}
@@ -1043,20 +1057,23 @@ void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInf
}
}
-SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult) {
- SResultRowPosition pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
+SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult, uint64_t groupId) {
+ SOpenWindowInfo openWin = {0};
+ openWin.pos.pageId = pResult->pageId;
+ openWin.pos.offset = pResult->offset;
+ openWin.groupId = groupId;
SListNode* pn = tdListGetTail(pResultRowInfo->openWindow);
if (pn == NULL) {
- tdListAppend(pResultRowInfo->openWindow, &pos);
- return pos;
+ tdListAppend(pResultRowInfo->openWindow, &openWin);
+ return openWin.pos;
}
- SResultRowPosition* px = (SResultRowPosition*)pn->data;
- if (px->pageId != pos.pageId || px->offset != pos.offset) {
- tdListAppend(pResultRowInfo->openWindow, &pos);
+ SOpenWindowInfo * px = (SOpenWindowInfo *)pn->data;
+ if (px->pos.pageId != openWin.pos.pageId || px->pos.offset != openWin.pos.offset || px->groupId != openWin.groupId) {
+ tdListAppend(pResultRowInfo->openWindow, &openWin);
}
- return pos;
+ return openWin.pos;
}
int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo) {
@@ -1185,7 +1202,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &window, masterScan, &pResult, gid, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false);
@@ -1210,12 +1227,12 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &pRowSup->win, masterScan, &pResult, gid,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
- pRowSup->numOfRows, pBlock->info.rows, numOfOutput);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows,
+ pBlock->info.rows, numOfOutput);
}
static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
@@ -1383,7 +1400,7 @@ bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t
int32_t numOfOutput) {
SET_RES_WINDOW_KEY(pAggSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
if (!p1) {
// window has been closed
return false;
@@ -1396,18 +1413,18 @@ bool doDeleteIntervalWindow(SAggSupporter* pAggSup, TSKEY ts, uint64_t groupId)
size_t bytes = sizeof(TSKEY);
SET_RES_WINDOW_KEY(pAggSup->keyBuf, &ts, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
if (!p1) {
// window has been closed
return false;
}
// SFilePage* bufPage = getBufPage(pAggSup->pResultBuf, p1->pageId);
// dBufSetBufPageRecycled(pAggSup->pResultBuf, bufPage);
- taosHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ tSimpleHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
return true;
}
-void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* pUpWins, SInterval* pInterval) {
+void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* pDelWins, SInterval* pInterval, SHashObj* pUpdatedMap) {
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* tsStarts = (TSKEY*)pStartCol->pData;
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
@@ -1417,9 +1434,12 @@ void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock,
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsStarts[i], pInterval, TSDB_ORDER_ASC);
doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]);
- if (pUpWins) {
- SWinRes winRes = {.ts = win.skey, .groupId = groupIds[i]};
- taosArrayPush(pUpWins, &winRes);
+ SWinKey winRes = {.ts = win.skey, .groupId = groupIds[i]};
+ if (pDelWins) {
+ taosArrayPush(pDelWins, &winRes);
+ }
+ if (pUpdatedMap) {
+ taosHashRemove(pUpdatedMap, &winRes, sizeof(SWinKey));
}
}
}
@@ -1430,22 +1450,17 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
TSKEY* startTsCols = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endTsCols = (TSKEY*)pEndTsCol->pData;
- uint64_t* pGpDatas = NULL;
- if (pBlock->info.type == STREAM_RETRIEVE) {
- SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
- pGpDatas = (uint64_t*)pGpCol->pData;
- }
- int32_t step = 0;
- int32_t startPos = 0;
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* pGpDatas = (uint64_t*)pGpCol->pData;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCols[i], pInterval, TSDB_ORDER_ASC);
while (win.ekey <= endTsCols[i]) {
- uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
+ uint64_t winGpId = pGpDatas[i];
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
if (pUpWins && res) {
- SWinRes winRes = {.ts = win.skey, .groupId = winGpId};
+ SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
taosArrayPush(pUpWins, &winRes);
}
getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
@@ -1453,11 +1468,13 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
}
}
-static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) {
- void* pIte = NULL;
- size_t keyLen = 0;
- while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
- void* key = taosHashGetKey(pIte, &keyLen);
+static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SHashObj* resWins) {
+
+ void* pIte = NULL;
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pIte, &keyLen);
uint64_t groupId = *(uint64_t*)key;
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
@@ -1470,25 +1487,26 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) {
return TSDB_CODE_SUCCESS;
}
-static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
+static int32_t closeIntervalWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
SHashObj* pPullDataMap, SHashObj* closeWins, SArray* pRecyPages,
SDiskbasedBuf* pDiscBuf) {
qDebug("===stream===close interval window");
- void* pIte = NULL;
- size_t keyLen = 0;
- while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
- void* key = taosHashGetKey(pIte, &keyLen);
+ void* pIte = NULL;
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pIte, &keyLen);
uint64_t groupId = *(uint64_t*)key;
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
STimeWindow win;
win.skey = ts;
win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
- SWinRes winRe = {
+ SWinKey winRe = {
.ts = win.skey,
.groupId = groupId,
};
- void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinRes));
+ void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinKey));
if (isCloseWindow(&win, pSup)) {
if (chIds && pPullDataMap) {
SArray* chAy = *(SArray**)chIds;
@@ -1515,7 +1533,7 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
}
char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))];
SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId);
- taosHashRemove(pHashMap, keyBuf, keyLen);
+ tSimpleHashIterateRemove(pHashMap, keyBuf, keyLen, &pIte, &iter);
}
}
return TSDB_CODE_SUCCESS;
@@ -1552,13 +1570,10 @@ static void doBuildDeleteResult(SArray* pWins, int32_t* index, SSDataBlock* pBlo
return;
}
blockDataEnsureCapacity(pBlock, size - *index);
- SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
- SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t uid = 0;
for (int32_t i = *index; i < size; i++) {
- SWinRes* pWin = taosArrayGet(pWins, i);
- colDataAppend(pTsCol, pBlock->info.rows, (const char*)&pWin->ts, false);
- colDataAppend(pGroupCol, pBlock->info.rows, (const char*)&pWin->groupId, false);
- pBlock->info.rows++;
+ SWinKey* pWin = taosArrayGet(pWins, i);
+ appendOneRow(pBlock, &pWin->ts, &pWin->ts, &uid, &pWin->groupId);
(*index)++;
}
}
@@ -1577,6 +1592,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "single interval");
return pInfo->pDelRes;
}
@@ -1595,6 +1611,9 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
+
+ SStreamState* pState = pTaskInfo->streamInfo.pState;
+
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -1610,7 +1629,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
continue;
}
if (pBlock->info.type == STREAM_DELETE_DATA) {
- doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval);
+ doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval, pUpdatedMap);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
@@ -1639,6 +1658,35 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap);
}
+#if 0
+ if (pState) {
+ printf(">>>>>>>> stream read backend\n");
+ SWinKey key = {
+ .ts = 1,
+ .groupId = 2,
+ };
+ char* val = NULL;
+ int32_t sz;
+ if (streamStateGet(pState, &key, (void**)&val, &sz) < 0) {
+ ASSERT(0);
+ }
+ printf("stream read %s %d\n", val, sz);
+ streamFreeVal(val);
+
+ SStreamStateCur* pCur = streamStateGetCur(pState, &key);
+ ASSERT(pCur);
+ while (streamStateCurNext(pState, pCur) == 0) {
+ SWinKey key1;
+ const void* val1;
+ if (streamStateGetKVByCur(pCur, &key1, &val1, &sz) < 0) {
+ break;
+ }
+ printf("stream iter key groupId:%d ts:%d, value %s %d\n", key1.groupId, key1.ts, val1, sz);
+ }
+ streamStateFreeCur(pCur);
+ }
+#endif
+
pOperator->status = OP_RES_TO_RETURN;
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap,
pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
@@ -1656,6 +1704,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
taosHashCleanup(pUpdatedMap);
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "single interval");
return pInfo->pDelRes;
}
@@ -1664,7 +1713,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
}
-static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyStateWindowOperatorInfo(void* param) {
SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
taosMemoryFreeClear(pInfo->stateKey.pData);
@@ -1677,7 +1726,7 @@ static void freeItem(void* param) {
taosMemoryFree(pKey->pData);
}
-void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyIntervalOperatorInfo(void* param) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
cleanupAggSup(&pInfo->aggSup);
@@ -1694,7 +1743,7 @@ void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyStreamFinalIntervalOperatorInfo(void* param) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
cleanupAggSup(&pInfo->aggSup);
@@ -1711,7 +1760,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i);
- destroyStreamFinalIntervalOperatorInfo(pChildOp->info, numOfOutput);
+ destroyStreamFinalIntervalOperatorInfo(pChildOp->info);
taosMemoryFree(pChildOp->pDownstream);
cleanupExprSupp(&pChildOp->exprSupp);
taosMemoryFreeClear(pChildOp);
@@ -1777,20 +1826,16 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt
return needed;
}
-void increaseTs(SqlFunctionCtx* pCtx) {
- if (pCtx[0].pExpr->pExpr->_function.pFunctNode->funcType == FUNCTION_TYPE_WSTART) {
- pCtx[0].increase = true;
- }
-}
-
-void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) {
+void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup, SInterval* pInterval, int64_t waterMark) {
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- // Todo(liuyao) support partition by column
+ initIntervalDownStream(downstream->pDownstream[0], type, pSup, pInterval, waterMark);
return;
}
SStreamScanInfo* pScanInfo = downstream->info;
- pScanInfo->sessionSup.parentType = type;
- pScanInfo->sessionSup.pIntervalAggSup = pSup;
+ pScanInfo->windowSup.parentType = type;
+ pScanInfo->windowSup.pIntervalAggSup = pSup;
+ pScanInfo->pUpdateInfo = updateInfoInitP(pInterval, waterMark);
+ pScanInfo->interval = *pInterval;
}
void initStreamFunciton(SqlFunctionCtx* pCtx, int32_t numOfExpr) {
@@ -1836,11 +1881,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
initResultSizeInfo(&pOperator->resultInfo, 4096);
int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
initBasicInfo(&pInfo->binfo, pResBlock);
if (isStream) {
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
initStreamFunciton(pSup->pCtx, pSup->numOfExprs);
}
@@ -1851,13 +1899,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, pInfo);
if (pInfo->timeWindowInterpo) {
- pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
+ pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
if (pInfo->binfo.resultRowInfo.openWindow == NULL) {
goto _error;
}
}
+
pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t));
- pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes));
+ pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey));
pInfo->delIndex = 0;
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
@@ -1866,15 +1915,13 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL,
destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
if (nodeType(pPhyNode) == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL) {
- initIntervalDownStream(downstream, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, &pInfo->aggSup);
+ initIntervalDownStream(downstream, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, &pInfo->aggSup, &pInfo->interval, pInfo->twAggSup.waterMark);
}
code = appendDownstream(pOperator, &downstream, 1);
@@ -1885,7 +1932,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
return pOperator;
_error:
- destroyIntervalOperatorInfo(pInfo, numOfCols);
+ destroyIntervalOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
@@ -1918,8 +1965,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
if (gid != pRowSup->groupId || pInfo->winSup.prevTs == INT64_MIN) {
doKeepNewWindowStartInfo(pRowSup, tsList, j, gid);
doKeepTuple(pRowSup, tsList[j], gid);
- } else if ((tsList[j] - pRowSup->prevTs >= 0) && tsList[j] - pRowSup->prevTs <= gap ||
- (pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap)) {
+ } else if (((tsList[j] - pRowSup->prevTs >= 0) && (tsList[j] - pRowSup->prevTs <= gap)) ||
+ ((pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap))) {
// The gap is less than the threshold, so it belongs to current session window that has been opened already.
doKeepTuple(pRowSup, tsList[j], gid);
if (j == 0 && pRowSup->startRowIndex != 0) {
@@ -1935,7 +1982,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &window, masterScan, &pResult, gid, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
// pInfo->numOfRows data belong to the current session window
@@ -1954,12 +2001,12 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &pRowSup->win, masterScan, &pResult, gid,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
- pRowSup->numOfRows, pBlock->info.rows, numOfOutput);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows,
+ pBlock->info.rows, numOfOutput);
}
static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
@@ -2112,6 +2159,7 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
// todo set the correct primary timestamp column
// output the result
+ bool hasInterp = true;
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
@@ -2123,7 +2171,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
switch (pSliceInfo->fillType) {
case TSDB_FILL_NULL: {
colDataAppendNULL(pDst, rows);
- pResBlock->info.rows += 1;
break;
}
@@ -2143,7 +2190,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i);
colDataAppend(pDst, rows, (char*)&v, false);
}
- pResBlock->info.rows += 1;
break;
}
@@ -2157,6 +2203,7 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
// before interp range, do not fill
if (start.key == INT64_MIN || end.key == INT64_MAX) {
+ hasInterp = false;
break;
}
@@ -2168,28 +2215,27 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
taosMemoryFree(current.val);
- pResBlock->info.rows += 1;
break;
}
case TSDB_FILL_PREV: {
if (!pSliceInfo->isPrevRowSet) {
+ hasInterp = false;
break;
}
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, srcSlot);
colDataAppend(pDst, rows, pkey->pData, false);
- pResBlock->info.rows += 1;
break;
}
case TSDB_FILL_NEXT: {
if (!pSliceInfo->isNextRowSet) {
+ hasInterp = false;
break;
}
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot);
colDataAppend(pDst, rows, pkey->pData, false);
- pResBlock->info.rows += 1;
break;
}
@@ -2198,6 +2244,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
break;
}
}
+
+ if (hasInterp) {
+ pResBlock->info.rows += 1;
+ }
+
}
static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
@@ -2342,7 +2393,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
int32_t code = initKeeperInfo(pSliceInfo, pBlock);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// the pDataBlock are always the same one, no need to call this again
@@ -2378,6 +2429,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
+ if (colDataIsNull_s(pSrc, i)) {
+ colDataAppendNULL(pDst, pResBlock->info.rows);
+ continue;
+ }
+
char* v = colDataGetData(pSrc, i);
colDataAppend(pDst, pResBlock->info.rows, v, false);
}
@@ -2570,7 +2626,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
return pResBlock->info.rows == 0 ? NULL : pResBlock;
}
-void destroyTimeSliceOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyTimeSliceOperatorInfo(void* param) {
STimeSliceOperatorInfo* pInfo = (STimeSliceOperatorInfo*)param;
pInfo->pRes = blockDataDestroy(pInfo->pRes);
@@ -2657,20 +2713,26 @@ _error:
return NULL;
}
-SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
- SSDataBlock* pResBlock, STimeWindowAggSupp* pTwAggSup, int32_t tsSlotId,
- SColumn* pStateKeyCol, SNode* pCondition, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode,
+ SExecTaskInfo* pTaskInfo) {
SStateWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStateWindowOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
- pInfo->stateCol = *pStateKeyCol;
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pStateNode->window.node.pOutputDataBlockDesc);
+ int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
+
+ SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
+
+ pInfo->stateCol = extractColumnFromColumnNode(pColNode);
pInfo->stateKey.type = pInfo->stateCol.type;
pInfo->stateKey.bytes = pInfo->stateCol.bytes;
pInfo->stateKey.pData = taosMemoryCalloc(1, pInfo->stateCol.bytes);
- pInfo->pCondition = pCondition;
+ pInfo->pCondition = pStateNode->window.node.pConditions;
if (pInfo->stateKey.pData == NULL) {
goto _error;
}
@@ -2678,12 +2740,15 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExpr, numOfCols, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&pInfo->binfo, pResBlock);
+ int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ initBasicInfo(&pInfo->binfo, pResBlock);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
- pInfo->twAggSup = *pTwAggSup;
+ pInfo->twAggSup = (STimeWindowAggSupp){.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType};;
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
pInfo->tsSlotId = tsSlotId;
@@ -2691,26 +2756,33 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExpr;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStateWindowAgg, NULL, NULL,
destroyStateWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
- int32_t code = appendDownstream(pOperator, &downstream, 1);
+ code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
return pOperator;
_error:
- pTaskInfo->code = TSDB_CODE_SUCCESS;
+ destroyStateWindowOperatorInfo(pInfo);
+ taosMemoryFreeClear(pOperator);
+ pTaskInfo->code = code;
return NULL;
}
-void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) {
+void destroySWindowOperatorInfo(void* param) {
SSessionAggOperatorInfo* pInfo = (SSessionAggOperatorInfo*)param;
- cleanupBasicInfo(&pInfo->binfo);
+ if (pInfo == NULL) {
+ return;
+ }
+ cleanupBasicInfo(&pInfo->binfo);
colDataDestroy(&pInfo->twAggSup.timeWindowData);
cleanupAggSup(&pInfo->aggSup);
@@ -2757,40 +2829,50 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL,
destroySWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
pOperator->pTaskInfo = pTaskInfo;
-
code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
return pOperator;
_error:
- if (pInfo != NULL) {
- destroySWindowOperatorInfo(pInfo, numOfCols);
- }
-
+ destroySWindowOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
}
void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int32_t numOfOutput,
- SExecTaskInfo* pTaskInfo) {
+ SExecTaskInfo* pTaskInfo, SColumnInfoData* pTimeWindowData) {
for (int32_t k = 0; k < numOfOutput; ++k) {
if (fmIsWindowPseudoColumnFunc(pDestCtx[k].functionId)) {
- continue;
- }
- int32_t code = TSDB_CODE_SUCCESS;
- if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) {
- code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]);
+ if (!pTimeWindowData) {
+ continue;
+ }
+
+ SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pDestCtx[k]);
+ char* p = GET_ROWCELL_INTERBUF(pEntryInfo);
+ SColumnInfoData idata = {0};
+ idata.info.type = TSDB_DATA_TYPE_BIGINT;
+ idata.info.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
+ idata.pData = p;
+
+ SScalarParam out = {.columnData = &idata};
+ SScalarParam tw = {.numOfRows = 5, .columnData = pTimeWindowData};
+ pDestCtx[k].sfp.process(&tw, 1, &out);
+ pEntryInfo->numOfRes = 1;
+ }else if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) {
+ int32_t code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]);
if (code != TSDB_CODE_SUCCESS) {
qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code));
pTaskInfo->code = code;
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
}
@@ -2800,24 +2882,33 @@ bool hasIntervalWindow(SAggSupporter* pSup, TSKEY ts, uint64_t groupId) {
int32_t bytes = sizeof(TSKEY);
SET_RES_WINDOW_KEY(pSup->keyBuf, &ts, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
return p1 != NULL;
}
+STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) {
+ STimeWindow w = {.skey = ts, .ekey = INT64_MAX};
+ w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
+ return w;
+}
+
static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExprSupp* pSup, SArray* pWinArray,
- int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo, SArray* pUpdated) {
+ int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo, SHashObj* pUpdatedMap) {
int32_t size = taosArrayGetSize(pWinArray);
if (!pInfo->pChildren) {
return;
}
for (int32_t i = 0; i < size; i++) {
- SWinRes* pWinRes = taosArrayGet(pWinArray, i);
+ SWinKey* pWinRes = taosArrayGet(pWinArray, i);
SResultRow* pCurResult = NULL;
- STimeWindow ParentWin = {.skey = pWinRes->ts, .ekey = pWinRes->ts + 1};
- setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &ParentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx,
+ STimeWindow parentWin = getFinalTimeWindow(pWinRes->ts, &pInfo->interval);
+ if (isDeletedWindow(&parentWin, pWinRes->groupId, &pInfo->aggSup) && isCloseWindow(&parentWin, &pInfo->twAggSup)) {
+ continue;
+ }
+ setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &parentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
- bool find = true;
+ int32_t num = 0;
for (int32_t j = 0; j < numOfChildren; j++) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, j);
SIntervalAggOperatorInfo* pChInfo = pChildOp->info;
@@ -2825,15 +2916,16 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr
if (!hasIntervalWindow(&pChInfo->aggSup, pWinRes->ts, pWinRes->groupId)) {
continue;
}
- find = true;
+ num++;
SResultRow* pChResult = NULL;
- setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, &ParentWin, true, &pChResult, pWinRes->groupId,
+ setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, &parentWin, true, &pChResult, pWinRes->groupId,
pChildSup->pCtx, pChildSup->numOfExprs, pChildSup->rowEntryInfoOffset, &pChInfo->aggSup,
pTaskInfo);
- compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &parentWin, true);
+ compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
}
- if (find && pUpdated) {
- saveResultRow(pCurResult, pWinRes->groupId, pUpdated);
+ if (num > 1 && pUpdatedMap) {
+ saveWinResultRow(pCurResult, pWinRes->groupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pInfo->binfo.resultRowInfo.cur);
}
}
@@ -2841,7 +2933,7 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup) {
SET_RES_WINDOW_KEY(pSup->keyBuf, &pWin->skey, sizeof(int64_t), groupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
GET_RES_WINDOW_KEY_LEN(sizeof(int64_t)));
return p1 == NULL;
}
@@ -2854,22 +2946,16 @@ int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY*
return getNextQualifiedWindow(pInterval, pNextWin, pBlockInfo, tsCols, prevEndPos, TSDB_ORDER_ASC);
}
-void addPullWindow(SHashObj* pMap, SWinRes* pWinRes, int32_t size) {
+void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) {
SArray* childIds = taosArrayInit(8, sizeof(int32_t));
for (int32_t i = 0; i < size; i++) {
taosArrayPush(childIds, &i);
}
- taosHashPut(pMap, pWinRes, sizeof(SWinRes), &childIds, sizeof(void*));
+ taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*));
}
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
-STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) {
- STimeWindow w = {.skey = ts, .ekey = INT64_MAX};
- w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
- return w;
-}
-
static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
SHashObj* pUpdatedMap) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info;
@@ -2906,11 +2992,11 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
}
if (IS_FINAL_OP(pInfo) && isClosed && pInfo->pChildren) {
bool ignore = true;
- SWinRes winRes = {
+ SWinKey winRes = {
.ts = nextWin.skey,
.groupId = tableGroupId,
};
- void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
+ void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
if (isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup) && !chIds) {
SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId};
// add pull data request
@@ -2944,7 +3030,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, true, &pResult, tableGroupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
if (IS_FINAL_OP(pInfo)) {
@@ -2970,10 +3056,10 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
}
static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) {
- taosHashClear(pInfo->aggSup.pResultRowHashTable);
+ tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
- cleanupResultRowInfo(&pInfo->binfo.resultRowInfo);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
+ pInfo->aggSup.currentPageId = -1;
}
static void clearSpecialDataBlock(SSDataBlock* pBlock) {
@@ -3039,8 +3125,8 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
uint64_t* groupIdData = (uint64_t*)pGroupCol->pData;
int32_t chId = getChildIndex(pBlock);
for (int32_t i = 0; i < pBlock->info.rows; i++) {
- SWinRes winRes = {.ts = tsData[i], .groupId = groupIdData[i]};
- void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinRes));
+ SWinKey winRes = {.ts = tsData[i], .groupId = groupIdData[i]};
+ void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinKey));
if (chIds) {
SArray* chArray = *(SArray**)chIds;
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
@@ -3049,13 +3135,32 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
taosArrayRemove(chArray, index);
if (taosArrayGetSize(chArray) == 0) {
// pull data is over
- taosHashRemove(pMap, &winRes, sizeof(SWinRes));
+ taosHashRemove(pMap, &winRes, sizeof(SWinKey));
}
}
}
}
}
+static void addRetriveWindow(SArray* wins, SStreamFinalIntervalOperatorInfo* pInfo) {
+ int32_t size = taosArrayGetSize(wins);
+ for (int32_t i = 0; i < size; i++) {
+ SWinKey* winKey = taosArrayGet(wins, i);
+ STimeWindow nextWin = getFinalTimeWindow(winKey->ts, &pInfo->interval);
+ if (isCloseWindow(&nextWin, &pInfo->twAggSup) && !pInfo->ignoreExpiredData) {
+ void* chIds = taosHashGet(pInfo->pPullDataMap, winKey, sizeof(SWinKey));
+ if (!chIds) {
+ SPullWindowInfo pull = {.window = nextWin, .groupId = winKey->groupId};
+ // add pull data request
+ savePullWindow(&pull, pInfo->pPullWins);
+ int32_t size = taosArrayGetSize(pInfo->pChildren);
+ addPullWindow(pInfo->pPullDataMap, winKey, size);
+ qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size);
+ }
+ }
+ }
+}
+
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
@@ -3080,12 +3185,20 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return pInfo->pPullDataRes;
}
+ doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
+ if (pInfo->pDelRes->info.rows != 0) {
+ // process the rest of the data
+ printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
+ return pInfo->pDelRes;
+ }
+
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows == 0) {
pOperator->status = OP_EXEC_DONE;
if (!IS_FINAL_OP(pInfo)) {
// semi interval operator clear disk buffer
clearStreamIntervalOperator(pInfo);
+ qDebug("===stream===clear semi operator");
} else {
freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
}
@@ -3129,11 +3242,11 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
maxTs = TMAX(maxTs, pBlock->info.window.ekey);
maxTs = TMAX(maxTs, pBlock->info.watermark);
- if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA ||
- pBlock->info.type == STREAM_INVALID) {
+ ASSERT(pBlock->info.type != STREAM_INVERT);
+ if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA) {
pInfo->binfo.pRes->info.type = pBlock->info.type;
} else if (pBlock->info.type == STREAM_CLEAR) {
- SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes));
+ SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey));
doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins);
if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock);
@@ -3149,29 +3262,34 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
removeResults(pUpWins, pUpdatedMap);
copyDataBlock(pInfo->pUpdateRes, pBlock);
- // copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
pInfo->returnUpdate = true;
taosArrayDestroy(pUpWins);
break;
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
- doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval);
+ SArray* delWins = taosArrayInit(8, sizeof(SWinKey));
+ doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, delWins, &pInfo->interval, pUpdatedMap);
if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock);
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
SStreamFinalIntervalOperatorInfo* pChildInfo = pChildOp->info;
SExprSupp* pChildSup = &pChildOp->exprSupp;
- doDeleteSpecifyIntervalWindow(&pChildInfo->aggSup, pBlock, NULL, &pChildInfo->interval);
- rebuildIntervalWindow(pInfo, pSup, pInfo->pDelWins, pInfo->binfo.pRes->info.groupId,
- pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, pUpdated);
+ doDeleteSpecifyIntervalWindow(&pChildInfo->aggSup, pBlock, NULL, &pChildInfo->interval, NULL);
+ rebuildIntervalWindow(pInfo, pSup, delWins, pInfo->binfo.pRes->info.groupId,
+ pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, pUpdatedMap);
+ addRetriveWindow(delWins, pInfo);
+ taosArrayAddAll(pInfo->pDelWins, delWins);
+ taosArrayDestroy(delWins);
continue;
}
- removeResults(pInfo->pDelWins, pUpdatedMap);
+ removeResults(delWins, pUpdatedMap);
+ taosArrayAddAll(pInfo->pDelWins, delWins);
+ taosArrayDestroy(delWins);
break;
} else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) {
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
continue;
} else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) {
- SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes));
+ SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey));
doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins);
removeResults(pUpWins, pUpdatedMap);
taosArrayDestroy(pUpWins);
@@ -3197,7 +3315,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
for (int32_t i = 0; i < chIndex + 1 - size; i++) {
SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0);
if (!pChildOp) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info;
pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
@@ -3239,6 +3357,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return pInfo->pPullDataRes;
}
+ // we should send result first.
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows != 0) {
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
@@ -3336,15 +3455,16 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
initStreamFunciton(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs);
initBasicInfo(&pInfo->binfo, pResBlock);
ASSERT(numOfCols > 0);
- increaseTs(pOperator->exprSupp.pCtx);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
+
initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->pChildren = NULL;
if (numOfChild > 0) {
@@ -3373,6 +3493,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
// semi interval operator does not catch result
pInfo->isFinal = false;
pOperator->name = "StreamSemiIntervalOperator";
+ ASSERT(pInfo->aggSup.currentPageId == -1);
}
if (!IS_FINAL_OP(pInfo) || numOfChild == 0) {
@@ -3386,21 +3507,19 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
pInfo->delIndex = 0;
- pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes));
+ pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey));
pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t));
pOperator->operatorType = pPhyNode->type;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL) {
- initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup);
+ initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup, &pInfo->interval, pInfo->twAggSup.waterMark);
}
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@@ -3410,7 +3529,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
return pOperator;
_error:
- destroyStreamFinalIntervalOperatorInfo(pInfo, numOfCols);
+ destroyStreamFinalIntervalOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
@@ -3448,7 +3567,7 @@ void destroyStateStreamAggSupporter(SStreamAggSupporter* pSup) {
blockDataDestroy(pSup->pScanBlock);
}
-void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyStreamSessionAggOperatorInfo(void* param) {
SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
destroyStreamAggSupporter(&pInfo->streamAggSup);
@@ -3458,7 +3577,7 @@ void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
- destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput);
+ destroyStreamSessionAggOperatorInfo(pChInfo);
taosMemoryFreeClear(pChild);
}
}
@@ -3483,11 +3602,10 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
initBasicInfo(pBasicInfo, pResultBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = NULL;
+ pSup->pCtx[i].saveHandle.pBuf = NULL;
}
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
return TSDB_CODE_SUCCESS;
}
@@ -3498,10 +3616,18 @@ void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t num
}
void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int64_t gap, int64_t waterMark,
- uint16_t type) {
- ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
+ uint16_t type, int32_t tsColIndex) {
+ if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION) {
+ SStreamPartitionOperatorInfo* pScanInfo = downstream->info;
+ pScanInfo->tsColIndex = tsColIndex;
+ }
+
+ if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ initDownStream(downstream->pDownstream[0], pAggSup, gap, waterMark, type, tsColIndex);
+ return;
+ }
SStreamScanInfo* pScanInfo = downstream->info;
- pScanInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = pAggSup, .gap = gap, .parentType = type};
+ pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = gap, .parentType = type};
pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, waterMark);
}
@@ -3529,7 +3655,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
if (pSessionNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar);
- int32_t code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -3573,27 +3699,24 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
pInfo->isFinal = false;
pInfo->pPhyNode = pPhyNode;
pInfo->ignoreExpiredData = pSessionNode->window.igExpired;
- pInfo->returnDelete = false;
pOperator->name = "StreamSessionWindowAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doStreamSessionAgg, NULL, NULL, destroyStreamSessionAggOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
if (downstream) {
- initDownStream(downstream, &pInfo->streamAggSup, pInfo->gap, pInfo->twAggSup.waterMark, pOperator->operatorType);
+ initDownStream(downstream, &pInfo->streamAggSup, pInfo->gap, pInfo->twAggSup.waterMark, pOperator->operatorType, pInfo->primaryTsIndex);
code = appendDownstream(pOperator, &downstream, 1);
}
return pOperator;
_error:
if (pInfo != NULL) {
- destroyStreamSessionAggOperatorInfo(pInfo, numOfCols);
+ destroyStreamSessionAggOperatorInfo(pInfo);
}
taosMemoryFreeClear(pOperator);
@@ -3616,13 +3739,13 @@ bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap) {
bool isInWindow(SResultWindowInfo* pWinInfo, TSKEY ts, int64_t gap) { return isInTimeWindow(&pWinInfo->win, ts, gap); }
-static SResultWindowInfo* insertNewSessionWindow(SArray* pWinInfos, TSKEY ts, int32_t index) {
- SResultWindowInfo win = {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false};
+static SResultWindowInfo* insertNewSessionWindow(SArray* pWinInfos, TSKEY startTs, TSKEY endTs, int32_t index) {
+ SResultWindowInfo win = {.pos.offset = -1, .pos.pageId = -1, .win.skey = startTs, .win.ekey = endTs, .isOutput = false};
return taosArrayInsert(pWinInfos, index, &win);
}
-static SResultWindowInfo* addNewSessionWindow(SArray* pWinInfos, TSKEY ts) {
- SResultWindowInfo win = {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false};
+static SResultWindowInfo* addNewSessionWindow(SArray* pWinInfos, TSKEY startTs, TSKEY endTs) {
+ SResultWindowInfo win = {.pos.offset = -1, .pos.pageId = -1, .win.skey = startTs, .win.ekey = endTs, .isOutput = false};
return taosArrayPush(pWinInfos, &win);
}
@@ -3681,7 +3804,7 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
int32_t size = taosArrayGetSize(pWinInfos);
if (size == 0) {
*pIndex = 0;
- return addNewSessionWindow(pWinInfos, startTs);
+ return addNewSessionWindow(pWinInfos, startTs, endTs);
}
// find the first position which is smaller than the key
int32_t index = binarySearch(pWinInfos, size, startTs, TSDB_ORDER_DESC, getSessionWindowEndkey);
@@ -3707,10 +3830,10 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
if (index == size - 1) {
*pIndex = taosArrayGetSize(pWinInfos);
- return addNewSessionWindow(pWinInfos, startTs);
+ return addNewSessionWindow(pWinInfos, startTs, endTs);
}
*pIndex = index + 1;
- return insertNewSessionWindow(pWinInfos, startTs, index + 1);
+ return insertNewSessionWindow(pWinInfos, startTs, endTs, index + 1);
}
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t groupId,
@@ -3721,8 +3844,8 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TS
}
if (pWinInfo->win.skey > pStartTs[i]) {
if (pStDeleted && pWinInfo->isOutput) {
- SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
+ SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId};
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->isOutput = false;
}
pWinInfo->win.skey = pStartTs[i];
@@ -3742,15 +3865,14 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes
// too many time window in query
int32_t size = taosArrayGetSize(pAggSup->pCurWins);
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH && size > MAX_INTERVAL_TIME_WINDOW) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
}
if (pWinInfo->pos.pageId == -1) {
- *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize);
+ *pResult = getNewResultRow(pAggSup->pResultBuf, &pAggSup->currentPageId, pAggSup->resultRowSize);
if (*pResult == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- initResultRow(*pResult);
// add a new result set for a new group
pWinInfo->pos.pageId = (*pResult)->pageId;
@@ -3837,11 +3959,12 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex,
setWindowOutputBuf(pWinInfo, &pWinResult, pInfo->pDummyCtx, groupId, numOfOutput, pSup->rowEntryInfoOffset,
&pInfo->streamAggSup, pTaskInfo);
pCurWin->win.ekey = TMAX(pCurWin->win.ekey, pWinInfo->win.ekey);
- compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->win, true);
+ compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition));
if (pWinInfo->isOutput) {
- SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
+ SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId};
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->isOutput = false;
}
taosArrayRemove(pInfo->streamAggSup.pCurWins, i);
@@ -3894,7 +4017,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
pStDeleted);
code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pOperator);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
int32_t winNum = getNumCompactWindow(pAggSup->pCurWins, winIndex, gap);
@@ -3903,10 +4026,10 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
}
pCurWin->isClosed = false;
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) {
- SWinRes value = {.ts = pCurWin->win.skey, .groupId = groupId};
- code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes));
+ SWinKey value = {.ts = pCurWin->win.skey, .groupId = groupId};
+ code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
pCurWin->isOutput = true;
}
@@ -3938,10 +4061,11 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
if (!pCurWin) {
break;
}
+ SResultWindowInfo delWin = *pCurWin;
deleteWindow(pAggSup->pCurWins, winIndex, fp);
if (result) {
- pCurWin->groupId = gpDatas[i];
- taosArrayPush(result, pCurWin);
+ delWin.groupId = gpDatas[i];
+ taosArrayPush(result, &delWin);
}
}
}
@@ -3951,11 +4075,12 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup,
int32_t numOfOutput, int64_t gap, SArray* result) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex);
TSKEY* tsCols = (TSKEY*)pColDataInfo->pData;
+ SColumnInfoData* pGpDataInfo = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* gpCols = (uint64_t*)pGpDataInfo->pData;
int32_t step = 0;
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
int32_t winIndex = 0;
- SResultWindowInfo* pCurWin =
- getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, pBlock->info.groupId, gap, &winIndex);
+ SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, gpCols[i], gap, &winIndex);
if (!pCurWin || pCurWin->pos.pageId == -1) {
// window has been closed.
step = 1;
@@ -3965,6 +4090,7 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup,
ASSERT(isInWindow(pCurWin, tsCols[i], gap));
doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pSup, numOfOutput);
if (result) {
+ pCurWin->groupId = gpCols[i];
taosArrayPush(result, pCurWin);
}
}
@@ -3980,9 +4106,9 @@ static int32_t copyUpdateResult(SHashObj* pStUpdated, SArray* pUpdated) {
if (pos == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
- pos->groupId = ((SWinRes*)pData)->groupId;
+ pos->groupId = ((SWinKey*)pData)->groupId;
pos->pos = *(SResultRowPosition*)key;
- *(int64_t*)pos->key = ((SWinRes*)pData)->ts;
+ *(int64_t*)pos->key = ((SWinKey*)pData)->ts;
taosArrayPush(pUpdated, &pos);
}
taosArraySort(pUpdated, resultrowComparAsc);
@@ -3998,11 +4124,19 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It
blockDataEnsureCapacity(pBlock, size);
size_t keyLen = 0;
while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) {
- SWinRes* res = *Ite;
- SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
- colDataAppend(pTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SWinKey* res = *Ite;
+ SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ colDataAppendNULL(pUidCol, pBlock->info.rows);
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
colDataAppend(pGpCol, pBlock->info.rows, (const char*)&res->groupId, false);
+ SColumnInfoData* pCalStCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
+ colDataAppendNULL(pCalStCol, pBlock->info.rows);
+ SColumnInfoData* pCalEdCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
+ colDataAppendNULL(pCalEdCol, pBlock->info.rows);
pBlock->info.rows += 1;
if (pBlock->info.rows + 1 >= pBlock->info.capacity) {
break;
@@ -4013,8 +4147,8 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It
}
}
-static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWinArray, int32_t groupId,
- int32_t numOfOutput, SOperatorInfo* pOperator) {
+static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWinArray,
+ int32_t numOfOutput, SOperatorInfo* pOperator, SHashObj* pStUpdated, bool needCreate) {
SExprSupp* pSup = &pOperator->exprSupp;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -4024,9 +4158,15 @@ static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWin
for (int32_t i = 0; i < size; i++) {
SResultWindowInfo* pParentWin = taosArrayGet(pWinArray, i);
SResultRow* pCurResult = NULL;
+ uint64_t groupId = pParentWin->groupId;
+ int32_t winIndex = 0;
+ if (needCreate) {
+ pParentWin = getSessionTimeWindow(&pInfo->streamAggSup, pParentWin->win.skey, pParentWin->win.ekey, groupId, 0, &winIndex);
+ }
setWindowOutputBuf(pParentWin, &pCurResult, pSup->pCtx, groupId, numOfOutput, pSup->rowEntryInfoOffset,
&pInfo->streamAggSup, pTaskInfo);
int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
+ int32_t num = 0;
for (int32_t j = 0; j < numOfChildren; j++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, j);
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
@@ -4042,15 +4182,24 @@ static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWin
SResultRow* pChResult = NULL;
setWindowOutputBuf(pChWin, &pChResult, pChild->exprSupp.pCtx, groupId, numOfOutput,
pChild->exprSupp.rowEntryInfoOffset, &pChInfo->streamAggSup, pTaskInfo);
- compactFunctions(pSup->pCtx, pChild->exprSupp.pCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pChWin->win, true);
+ compactFunctions(pSup->pCtx, pChild->exprSupp.pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
SFilePage* bufPage = getBufPage(pChInfo->streamAggSup.pResultBuf, pChWin->pos.pageId);
releaseBufPage(pChInfo->streamAggSup.pResultBuf, bufPage);
+ num++;
continue;
} else if (!pChWin->isClosed) {
break;
}
}
}
+ if (num == 0 && needCreate) {
+ deleteWindow(pInfo->streamAggSup.pCurWins, winIndex, NULL);
+ }
+ if (pStUpdated && num > 0) {
+ SWinKey value = {.ts = pParentWin->win.skey, .groupId = groupId};
+ taosHashPut(pStUpdated, &pParentWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey));
+ }
SFilePage* bufPage = getBufPage(pInfo->streamAggSup.pResultBuf, pParentWin->pos.pageId);
ASSERT(size > 0);
setBufPageDirty(bufPage, true);
@@ -4129,8 +4278,47 @@ static void copyDeleteWindowInfo(SArray* pResWins, SHashObj* pStDeleted) {
int32_t size = taosArrayGetSize(pResWins);
for (int32_t i = 0; i < size; i++) {
SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i);
- SWinRes res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
+ SWinKey res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId};
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
+ }
+}
+
+static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {
+ int32_t size = taosArrayGetSize(pWins);
+ for (int32_t i = 0; i < size; i++) {
+ SResultWindowInfo* pWin = taosArrayGet(pWins, i);
+ taosHashRemove(pHashMap, &pWin->pos, sizeof(SResultRowPosition));
+ }
+}
+
+int32_t compareWinKey(void* pKey, void* data, int32_t index) {
+ SArray* res = (SArray*)data;
+ SResKeyPos* pos = taosArrayGetP(res, index);
+ SWinKey* pData = (SWinKey*)pKey;
+ if (pData->ts == *(int64_t*)pos->key) {
+ if (pData->groupId > pos->groupId) {
+ return 1;
+ } else if (pData->groupId < pos->groupId) {
+ return -1;
+ }
+ return 0;
+ } else if (pData->ts > *(int64_t*)pos->key) {
+ return 1;
+ }
+ return -1;
+}
+
+static void removeSessionDeleteResults(SArray* update, SHashObj* pStDeleted) {
+ int32_t size = taosHashGetSize(pStDeleted);
+ if (size == 0) {
+ return;
+ }
+
+ int32_t num = taosArrayGetSize(update);
+ for (int32_t i = 0; i < num; i++) {
+ SResKeyPos* pos = taosArrayGetP(update, i);
+ SWinKey winKey = {.ts = *(int64_t*)pos->key, .groupId = pos->groupId};
+ taosHashRemove(pStDeleted, &winKey, sizeof(SWinKey));
}
}
@@ -4158,7 +4346,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
SHashObj* pStUpdated = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
SOperatorInfo* downstream = pOperator->pDownstream[0];
- SArray* pUpdated = taosArrayInit(16, POINTER_BYTES);
+ SArray* pUpdated = taosArrayInit(16, POINTER_BYTES); // SResKeyPos
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -4168,15 +4356,15 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
if (pBlock->info.type == STREAM_CLEAR) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
- doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, 0, pOperator->exprSupp.numOfExprs, 0,
- pWins);
+ doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, START_TS_COLUMN_INDEX,
+ pOperator->exprSupp.numOfExprs, 0, pWins);
if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock);
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
- doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, 0, pChildOp->exprSupp.numOfExprs,
+ doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, START_TS_COLUMN_INDEX, pChildOp->exprSupp.numOfExprs,
0, NULL);
- rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator);
+ rebuildTimeWindow(pInfo, pWins, pOperator->exprSupp.numOfExprs, pOperator, NULL, false);
}
taosArrayDestroy(pWins);
continue;
@@ -4190,9 +4378,10 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
// gap must be 0
doDeleteTimeWindows(&pChildInfo->streamAggSup, pBlock, 0, NULL, NULL);
- rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator);
+ rebuildTimeWindow(pInfo, pWins, pOperator->exprSupp.numOfExprs, pOperator, pStUpdated, true);
}
copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
+ removeSessionResults(pStUpdated, pWins);
taosArrayDestroy(pWins);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
@@ -4215,7 +4404,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
SOperatorInfo* pChildOp =
createStreamFinalSessionAggOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0);
if (!pChildOp) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
taosArrayPush(pInfo->pChildren, &pChildOp);
}
@@ -4235,6 +4424,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
pInfo->ignoreExpiredData, NULL);
closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData, NULL);
copyUpdateResult(pStUpdated, pUpdated);
+ removeSessionDeleteResults(pUpdated, pInfo->pStDeleted);
taosHashCleanup(pStUpdated);
finalizeUpdatedResult(pSup->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
@@ -4262,16 +4452,7 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
}
}
clearDiskbasedBuf(pInfo->streamAggSup.pResultBuf);
- cleanupResultRowInfo(&pInfo->binfo.resultRowInfo);
- initResultRowInfo(&pInfo->binfo.resultRowInfo);
-}
-
-static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {
- int32_t size = taosArrayGetSize(pWins);
- for (int32_t i = 0; i < size; i++) {
- SResultWindowInfo* pWin = taosArrayGet(pWins, i);
- taosHashRemove(pHashMap, &pWin->pos, sizeof(SResultRowPosition));
- }
+ pInfo->streamAggSup.currentPageId = -1;
}
static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
@@ -4282,30 +4463,34 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
- } else if (pOperator->status == OP_RES_TO_RETURN) {
+ }
+
+ {
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
if (pBInfo->pRes->info.rows > 0) {
- printDataBlock(pBInfo->pRes, "sems session");
+ printDataBlock(pBInfo->pRes, "semi session");
return pBInfo->pRes;
}
- // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
- if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) {
- pInfo->returnDelete = true;
- printDataBlock(pInfo->pDelRes, "sems session");
+ doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
+ if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "semi session");
return pInfo->pDelRes;
}
- if (pInfo->pUpdateRes->info.rows > 0) {
+ if (pInfo->pUpdateRes->info.rows > 0 && pInfo->returnUpdate) {
+ pInfo->returnUpdate = false;
// process the rest of the data
- pOperator->status = OP_OPENED;
- printDataBlock(pInfo->pUpdateRes, "sems session");
+ printDataBlock(pInfo->pUpdateRes, "semi session");
return pInfo->pUpdateRes;
}
- // semi interval operator clear disk buffer
- clearStreamSessionOperator(pInfo);
- pOperator->status = OP_EXEC_DONE;
- return NULL;
+
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ // semi interval operator clear disk buffer
+ clearStreamSessionOperator(pInfo);
+ pOperator->status = OP_EXEC_DONE;
+ return NULL;
+ }
}
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
@@ -4316,21 +4501,26 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
clearSpecialDataBlock(pInfo->pUpdateRes);
+ pOperator->status = OP_RES_TO_RETURN;
break;
}
+ printDataBlock(pBlock, "semi session recv");
if (pBlock->info.type == STREAM_CLEAR) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
- doClearSessionWindows(&pInfo->streamAggSup, pSup, pBlock, 0, pSup->numOfExprs, 0, pWins);
+ doClearSessionWindows(&pInfo->streamAggSup, pSup, pBlock, START_TS_COLUMN_INDEX, pSup->numOfExprs, 0, pWins);
removeSessionResults(pStUpdated, pWins);
taosArrayDestroy(pWins);
- copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
+ copyDataBlock(pInfo->pUpdateRes, pBlock);
+ pInfo->returnUpdate = true;
break;
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
// gap must be 0
- doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, NULL, NULL);
- copyDataBlock(pInfo->pDelRes, pBlock);
- pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;
+ SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
+ doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, NULL);
+ copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
+ removeSessionResults(pStUpdated, pWins);
+ taosArrayDestroy(pWins);
break;
} else if (pBlock->info.type == STREAM_GET_ALL) {
getAllSessionWindow(pInfo->streamAggSup.pResultRows, pUpdated, getResWinForSession);
@@ -4343,18 +4533,15 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
}
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSup->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
- doStreamSessionAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted, false);
+ doStreamSessionAggImpl(pOperator, pBlock, pStUpdated, NULL, false);
maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
}
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
pBInfo->pRes->info.watermark = pInfo->twAggSup.maxTs;
- // restore the value
- pOperator->status = OP_RES_TO_RETURN;
- // semi operator
- // closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated,
- // getResWinForSession);
+
copyUpdateResult(pStUpdated, pUpdated);
+ removeSessionDeleteResults(pUpdated, pInfo->pStDeleted);
taosHashCleanup(pStUpdated);
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated,
@@ -4364,21 +4551,20 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
if (pBInfo->pRes->info.rows > 0) {
- printDataBlock(pBInfo->pRes, "sems session");
+ printDataBlock(pBInfo->pRes, "semi session");
return pBInfo->pRes;
}
- // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
- if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) {
- pInfo->returnDelete = true;
- printDataBlock(pInfo->pDelRes, "sems session");
+ doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
+ if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "semi session");
return pInfo->pDelRes;
}
- if (pInfo->pUpdateRes->info.rows > 0) {
+ if (pInfo->pUpdateRes->info.rows > 0 && pInfo->returnUpdate) {
+ pInfo->returnUpdate = false;
// process the rest of the data
- pOperator->status = OP_OPENED;
- printDataBlock(pInfo->pUpdateRes, "sems session");
+ printDataBlock(pInfo->pUpdateRes, "semi session");
return pInfo->pUpdateRes;
}
@@ -4400,8 +4586,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
pOperator->name = "StreamSessionFinalAggOperator";
} else {
pInfo->isFinal = false;
- pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
- pInfo->pUpdateRes->info.type = STREAM_CLEAR;
+ pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR);
blockDataEnsureCapacity(pInfo->pUpdateRes, 128);
pOperator->name = "StreamSessionSemiAggOperator";
pOperator->fpSet =
@@ -4423,7 +4608,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
_error:
if (pInfo != NULL) {
- destroyStreamSessionAggOperatorInfo(pInfo, pOperator->exprSupp.numOfExprs);
+ destroyStreamSessionAggOperatorInfo(pInfo);
}
taosMemoryFreeClear(pOperator);
@@ -4431,7 +4616,7 @@ _error:
return NULL;
}
-void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyStreamStateOperatorInfo(void* param) {
SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
destroyStateStreamAggSupporter(&pInfo->streamAggSup);
@@ -4441,7 +4626,7 @@ void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) {
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
- destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput);
+ destroyStreamSessionAggOperatorInfo(pChInfo);
taosMemoryFreeClear(pChild);
taosMemoryFreeClear(pChInfo);
}
@@ -4579,7 +4764,8 @@ SStateWindowInfo* getStateWindow(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_
}
int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, uint64_t groupId,
- SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual, SHashObj* pSeDeleted) {
+ SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual,
+ SHashObj* pSeDeleted) {
*allEqual = true;
SStateWindowInfo* pWinInfo = taosArrayGet(pWinInfos, winIndex);
for (int32_t i = start; i < rows; ++i) {
@@ -4600,9 +4786,8 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, u
}
if (pWinInfo->winInfo.win.skey > pTs[i]) {
if (pSeDeleted && pWinInfo->winInfo.isOutput) {
- SWinRes res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId};
- taosHashPut(pSeDeleted, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &res,
- sizeof(SWinRes));
+ SWinKey res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId};
+ taosHashPut(pSeDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->winInfo.isOutput = false;
}
pWinInfo->winInfo.win.skey = pTs[i];
@@ -4615,24 +4800,21 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, u
return rows - start;
}
-static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock,
- int32_t tsIndex, SColumn* pCol, int32_t keyIndex, SHashObj* pSeUpdated, SHashObj* pSeDeleted) {
- SColumnInfoData* pTsColInfo = taosArrayGet(pBlock->pDataBlock, tsIndex);
- SColumnInfoData* pKeyColInfo = taosArrayGet(pBlock->pDataBlock, keyIndex);
+static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SHashObj* pSeUpdated,
+ SHashObj* pSeDeleted) {
+ SColumnInfoData* pTsColInfo = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pGroupColInfo = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
TSKEY* tsCol = (TSKEY*)pTsColInfo->pData;
bool allEqual = false;
int32_t step = 1;
- uint64_t groupId = pBlock->info.groupId;
+ uint64_t* gpCol = (uint64_t*)pGroupColInfo->pData;
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
- char* pKeyData = colDataGetData(pKeyColInfo, i);
int32_t winIndex = 0;
- SStateWindowInfo* pCurWin = getStateWindowByTs(pAggSup, tsCol[i], groupId, &winIndex);
+ SStateWindowInfo* pCurWin = getStateWindowByTs(pAggSup, tsCol[i], gpCol[i], &winIndex);
if (!pCurWin) {
continue;
}
- step = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCol, groupId, pKeyColInfo,
- pBlock->info.rows, i, &allEqual, pSeDeleted);
- ASSERT(isTsInWindow(pCurWin, tsCol[i]) || isEqualStateKey(pCurWin, pKeyData));
+ updateSessionWindowInfo(&pCurWin->winInfo, tsCol, NULL, 0, pBlock->info.rows, i, 0, NULL);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo);
}
@@ -4669,27 +4851,27 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
char* pKeyData = colDataGetData(pKeyColInfo, i);
int32_t winIndex = 0;
bool allEqual = true;
- SStateWindowInfo* pCurWin =
- getStateWindow(pAggSup, tsCols[i], groupId, pKeyData, &pInfo->stateCol, &winIndex);
- winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo,
- pSDataBlock->info.rows, i, &allEqual, pStDeleted);
+ SStateWindowInfo* pCurWin = getStateWindow(pAggSup, tsCols[i], groupId, pKeyData, &pInfo->stateCol, &winIndex);
+ winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo, pSDataBlock->info.rows,
+ i, &allEqual, pStDeleted);
if (!allEqual) {
+ uint64_t uid = 0;
appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey,
- &groupId);
+ &uid, &groupId);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo);
continue;
}
code = doOneStateWindowAgg(pInfo, pSDataBlock, &pCurWin->winInfo, &pResult, i, winRows, numOfOutput, pOperator);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
pCurWin->winInfo.isClosed = false;
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
- SWinRes value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId};
- code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes));
+ SWinKey value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId};
+ code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
pCurWin->winInfo.isOutput = true;
}
@@ -4704,6 +4886,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
SExprSupp* pSup = &pOperator->exprSupp;
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
SOptrBasicInfo* pBInfo = &pInfo->binfo;
+ int64_t maxTs = INT64_MIN;
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildDeleteDataBlock(pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
if (pInfo->pDelRes->info.rows > 0) {
@@ -4730,13 +4913,13 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
printDataBlock(pBlock, "single state recv");
if (pBlock->info.type == STREAM_CLEAR) {
- doClearStateWindows(&pInfo->streamAggSup, pBlock, pInfo->primaryTsIndex, &pInfo->stateCol, pInfo->stateCol.slotId,
- pSeUpdated, pInfo->pSeDeleted);
+ doClearStateWindows(&pInfo->streamAggSup, pBlock, pSeUpdated, pInfo->pSeDeleted);
continue;
} else if (pBlock->info.type == STREAM_DELETE_DATA) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, destroyStateWinInfo);
copyDeleteWindowInfo(pWins, pInfo->pSeDeleted);
+ removeSessionResults(pSeUpdated, pWins);
taosArrayDestroy(pWins);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
@@ -4751,8 +4934,9 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSup->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
doStreamStateAggImpl(pOperator, pBlock, pSeUpdated, pInfo->pSeDeleted);
- pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
+ maxTs = TMAX(maxTs, pBlock->info.window.ekey);
}
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
// restore the value
pOperator->status = OP_RES_TO_RETURN;
@@ -4847,13 +5031,11 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->exprSupp.pExprInfo = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamStateAgg, NULL, NULL,
destroyStreamStateOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
- initDownStream(downstream, &pInfo->streamAggSup, 0, pInfo->twAggSup.waterMark, pOperator->operatorType);
+ initDownStream(downstream, &pInfo->streamAggSup, 0, pInfo->twAggSup.waterMark, pOperator->operatorType, pInfo->primaryTsIndex);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@@ -4861,16 +5043,15 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
return pOperator;
_error:
- destroyStreamStateOperatorInfo(pInfo, numOfCols);
+ destroyStreamStateOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
}
-void destroyMergeAlignedIntervalOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyMergeAlignedIntervalOperatorInfo(void* param) {
SMergeAlignedIntervalAggOperatorInfo* miaInfo = (SMergeAlignedIntervalAggOperatorInfo*)param;
- destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo, numOfOutput);
-
+ destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo);
taosMemoryFreeClear(param);
}
@@ -4883,14 +5064,14 @@ static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, ui
SExprSupp* pSup = &pOperatorInfo->exprSupp;
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &wstartTs, TSDB_KEYSIZE, tableGroupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pSup->pCtx, pSup->pExprInfo, pSup->numOfExprs,
pSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
- taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
+ tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
+ ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
return TSDB_CODE_SUCCESS;
}
@@ -4913,7 +5094,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
// there is an result exists
if (miaInfo->curTs != INT64_MIN) {
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
+ ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
if (ts != miaInfo->curTs) {
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
@@ -4921,7 +5102,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
}
} else {
miaInfo->curTs = ts;
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
+ ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
}
STimeWindow win = {0};
@@ -4933,7 +5114,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
int32_t currPos = startPos;
@@ -4960,7 +5141,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
miaInfo->curTs = currWin.skey;
@@ -4997,7 +5178,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
if (pBlock == NULL) {
// close last unfinalized time window
if (miaInfo->curTs != INT64_MIN) {
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
+ ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
miaInfo->curTs = INT64_MIN;
}
@@ -5065,9 +5246,7 @@ static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
return (rows == 0) ? NULL : pRes;
}
-SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo,
- int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval,
- int32_t primaryTsSlotId, SNode* pCondition, bool mergeResultBlock,
+SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode,
SExecTaskInfo* pTaskInfo) {
SMergeAlignedIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeAlignedIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
@@ -5080,48 +5259,54 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
goto _error;
}
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pNode->window.node.pOutputDataBlockDesc);
+
+ SInterval interval = {.interval = pNode->interval,
+ .sliding = pNode->sliding,
+ .intervalUnit = pNode->intervalUnit,
+ .slidingUnit = pNode->slidingUnit,
+ .offset = pNode->offset,
+ .precision = ((SColumnNode*)pNode->window.pTspk)->node.resType.precision};
+
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
SExprSupp* pSup = &pOperator->exprSupp;
- miaInfo->pCondition = pCondition;
- miaInfo->curTs = INT64_MIN;
-
- iaInfo->win = pTaskInfo->window;
- iaInfo->inputOrder = TSDB_ORDER_ASC;
- iaInfo->interval = *pInterval;
- iaInfo->execModel = pTaskInfo->execModel;
- iaInfo->primaryTsIndex = primaryTsSlotId;
- iaInfo->binfo.mergeResultBlock = mergeResultBlock;
+ miaInfo->pCondition = pNode->window.node.pConditions;
+ miaInfo->curTs = INT64_MIN;
+ iaInfo->win = pTaskInfo->window;
+ iaInfo->inputOrder = TSDB_ORDER_ASC;
+ iaInfo->interval = interval;
+ iaInfo->execModel = pTaskInfo->execModel;
+ iaInfo->primaryTsIndex = ((SColumnNode*)pNode->window.pTspk)->slotId;
+ iaInfo->binfo.mergeResultBlock = pNode->window.mergeDataBlock;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- int32_t code =
- initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&iaInfo->binfo, pResBlock);
+ int32_t code = initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ initBasicInfo(&iaInfo->binfo, pResBlock);
initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win);
- iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, iaInfo);
+ iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, num, iaInfo);
if (iaInfo->timeWindowInterpo) {
- iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
- }
-
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
+ iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
}
initResultRowInfo(&iaInfo->binfo.resultRowInfo);
blockDataEnsureCapacity(iaInfo->binfo.pRes, pOperator->resultInfo.capacity);
- pOperator->name = "TimeMergeAlignedIntervalAggOperator";
+ pOperator->name = "TimeMergeAlignedIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL;
- pOperator->blocking = false;
- pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->pTaskInfo = pTaskInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->info = miaInfo;
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->pTaskInfo = pTaskInfo;
+ pOperator->info = miaInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL,
destroyMergeAlignedIntervalOperatorInfo, NULL, NULL, NULL);
@@ -5134,7 +5319,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
return pOperator;
_error:
- destroyMergeAlignedIntervalOperatorInfo(miaInfo, numOfCols);
+ destroyMergeAlignedIntervalOperatorInfo(miaInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
@@ -5157,10 +5342,10 @@ typedef struct SGroupTimeWindow {
STimeWindow window;
} SGroupTimeWindow;
-void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyMergeIntervalOperatorInfo(void* param) {
SMergeIntervalAggOperatorInfo* miaInfo = (SMergeIntervalAggOperatorInfo*)param;
tdListFree(miaInfo->groupIntervals);
- destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput);
+ destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo);
taosMemoryFreeClear(param);
}
@@ -5174,12 +5359,12 @@ static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t table
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &win->skey, TSDB_KEYSIZE, tableGroupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo,
pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
- taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
+ tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
return TSDB_CODE_SUCCESS;
}
@@ -5235,7 +5420,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx,
numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
TSKEY ekey = ascScan ? win.ekey : win.skey;
@@ -5245,14 +5430,14 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
// prev time window not interpolation yet.
if (iaInfo->timeWindowInterpo) {
- SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult);
+ SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos);
// restore current time window
ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx,
numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
// window start key interpolation
@@ -5281,7 +5466,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
pExprSup->pCtx, numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
ekey = ascScan ? nextWin.ekey : nextWin.skey;
@@ -5378,54 +5563,65 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
return (rows == 0) ? NULL : pRes;
}
-SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- bool mergeBlock, SExecTaskInfo* pTaskInfo) {
- SMergeIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo));
+SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode,
+ SExecTaskInfo* pTaskInfo) {
+ SMergeIntervalAggOperatorInfo* pMergeIntervalInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
- if (miaInfo == NULL || pOperator == NULL) {
+ if (pMergeIntervalInfo == NULL || pOperator == NULL) {
goto _error;
}
- miaInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow));
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pIntervalPhyNode->window.node.pOutputDataBlockDesc);
- SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
- iaInfo->win = pTaskInfo->window;
- iaInfo->inputOrder = TSDB_ORDER_ASC;
- iaInfo->interval = *pInterval;
- iaInfo->execModel = pTaskInfo->execModel;
- iaInfo->binfo.mergeResultBlock = mergeBlock;
+ SInterval interval = {.interval = pIntervalPhyNode->interval,
+ .sliding = pIntervalPhyNode->sliding,
+ .intervalUnit = pIntervalPhyNode->intervalUnit,
+ .slidingUnit = pIntervalPhyNode->slidingUnit,
+ .offset = pIntervalPhyNode->offset,
+ .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
- iaInfo->primaryTsIndex = primaryTsSlotId;
+ pMergeIntervalInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow));
+
+ SIntervalAggOperatorInfo* pIntervalInfo = &pMergeIntervalInfo->intervalAggOperatorInfo;
+ pIntervalInfo->win = pTaskInfo->window;
+ pIntervalInfo->inputOrder = TSDB_ORDER_ASC;
+ pIntervalInfo->interval = interval;
+ pIntervalInfo->execModel = pTaskInfo->execModel;
+ pIntervalInfo->binfo.mergeResultBlock = pIntervalPhyNode->window.mergeDataBlock;
+ pIntervalInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
SExprSupp* pExprSupp = &pOperator->exprSupp;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- int32_t code = initAggInfo(pExprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&iaInfo->binfo, pResBlock);
+ int32_t code = initAggInfo(pExprSupp, &pIntervalInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
- initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win);
+ initBasicInfo(&pIntervalInfo->binfo, pResBlock);
+ initExecTimeWindowInfo(&pIntervalInfo->twAggSup.timeWindowData, &pIntervalInfo->win);
- iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, numOfCols, iaInfo);
- if (iaInfo->timeWindowInterpo) {
- iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
- if (iaInfo->binfo.resultRowInfo.openWindow == NULL) {
+
+ pIntervalInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, num, pIntervalInfo);
+ if (pIntervalInfo->timeWindowInterpo) {
+ pIntervalInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
+ if (pIntervalInfo->binfo.resultRowInfo.openWindow == NULL) {
goto _error;
}
}
- initResultRowInfo(&iaInfo->binfo.resultRowInfo);
+ initResultRowInfo(&pIntervalInfo->binfo.resultRowInfo);
- pOperator->name = "TimeMergeIntervalAggOperator";
+ pOperator->name = "TimeMergeIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL;
- pOperator->blocking = false;
- pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->pTaskInfo = pTaskInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->info = miaInfo;
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->pTaskInfo = pTaskInfo;
+ pOperator->info = pMergeIntervalInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeIntervalAgg, NULL, NULL,
destroyMergeIntervalOperatorInfo, NULL, NULL, NULL);
@@ -5438,7 +5634,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
return pOperator;
_error:
- destroyMergeIntervalOperatorInfo(miaInfo, numOfCols);
+ destroyMergeIntervalOperatorInfo(pMergeIntervalInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c
index ad97d79f7e7ad28da3cf51aab33010303e11f509..cffabcb6aca1f1f5ba457fb765828889bc3c03e6 100644
--- a/source/libs/executor/src/tlinearhash.c
+++ b/source/libs/executor/src/tlinearhash.c
@@ -26,7 +26,7 @@ typedef struct SLHashBucket {
int32_t size; // the number of element in this entry
} SLHashBucket;
-typedef struct SLHashObj {
+struct SLHashObj {
SDiskbasedBuf *pBuf;
_hash_fn_t hashFn;
SLHashBucket **pBucket; // entry list
@@ -35,7 +35,7 @@ typedef struct SLHashObj {
int32_t bits; // the number of bits used in hash
int32_t numOfBuckets; // the number of buckets
int64_t size; // the number of total items
-} SLHashObj;
+};
/**
* the data struct for each hash node
@@ -97,9 +97,9 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
// allocate the overflow buffer page to hold this k/v.
int32_t newPageId = -1;
- SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId);
+ SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, &newPageId);
if (pNewPage == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
+ return terrno;
}
taosArrayPush(pBucket->pPageIdList, &newPageId);
@@ -138,7 +138,6 @@ static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket
}
setBufPageDirty(pPage, true);
-
pBucket->size -= 1;
}
@@ -228,7 +227,11 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) {
}
int32_t pageId = -1;
- SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId);
+ SFilePage* p = getNewBufPage(pHashObj->pBuf, &pageId);
+ if (p == NULL) {
+ return terrno;
+ }
+
p->num = sizeof(SFilePage);
setBufPageDirty(p, true);
@@ -252,7 +255,8 @@ SLHashObj* tHashInit(int32_t inMemPages, int32_t pageSize, _hash_fn_t fn, int32_
printf("tHash Init failed since %s", terrstr(terrno));
return NULL;
}
- int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, 0, tsTempDir);
+
+ int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, "", tsTempDir);
if (code != 0) {
terrno = code;
return NULL;
@@ -389,7 +393,9 @@ char* tHashGet(SLHashObj* pHashObj, const void *key, size_t keyLen) {
}
SLHashBucket* pBucket = pHashObj->pBucket[bucketId];
- for (int32_t i = 0; i < taosArrayGetSize(pBucket->pPageIdList); ++i) {
+ int32_t num = taosArrayGetSize(pBucket->pPageIdList);
+
+ for (int32_t i = 0; i < num; ++i) {
int32_t pageId = *(int32_t*)taosArrayGet(pBucket->pPageIdList, i);
SFilePage* p = getBufPage(pHashObj->pBuf, pageId);
diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c
index 6b2edf0d5e6e1f41b5d354d110fb23892a864b33..84b615af7a93aef9fbf86190a2544474b7b2c87b 100644
--- a/source/libs/executor/src/tsimplehash.c
+++ b/source/libs/executor/src/tsimplehash.c
@@ -31,21 +31,12 @@
taosMemoryFreeClear(_n); \
} while (0);
-#pragma pack(push, 4)
-typedef struct SHNode {
- struct SHNode *next;
- uint32_t keyLen : 20;
- uint32_t dataLen : 12;
- char data[];
-} SHNode;
-#pragma pack(pop)
-
struct SSHashObj {
SHNode **hashList;
size_t capacity; // number of slots
- int64_t size; // number of elements in hash table
- _hash_fn_t hashFp; // hash function
- _equal_fn_t equalFp; // equal function
+ int64_t size; // number of elements in hash table
+ _hash_fn_t hashFp; // hash function
+ _equal_fn_t equalFp; // equal function
};
static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
@@ -76,7 +67,6 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) {
pHashObj->hashFp = fn;
ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0);
-
pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *));
if (!pHashObj->hashList) {
taosMemoryFree(pHashObj);
@@ -285,6 +275,40 @@ int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) {
return TSDB_CODE_SUCCESS;
}
+int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t keyLen, void **pIter, int32_t *iter) {
+ if (!pHashObj || !key) {
+ return TSDB_CODE_FAILED;
+ }
+
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen);
+
+ int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
+
+ SHNode *pNode = pHashObj->hashList[slot];
+ SHNode *pPrev = NULL;
+ while (pNode) {
+ if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) {
+ if (!pPrev) {
+ pHashObj->hashList[slot] = pNode->next;
+ } else {
+ pPrev->next = pNode->next;
+ }
+
+ if (*pIter == (void *)GET_SHASH_NODE_DATA(pNode)) {
+ *pIter = pPrev ? GET_SHASH_NODE_DATA(pPrev) : NULL;
+ }
+
+ FREE_HASH_NODE(pNode);
+ atomic_sub_fetch_64(&pHashObj->size, 1);
+ break;
+ }
+ pPrev = pNode;
+ pNode = pNode->next;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
void tSimpleHashClear(SSHashObj *pHashObj) {
if (!pHashObj || taosHashTableEmpty(pHashObj)) {
return;
@@ -302,6 +326,7 @@ void tSimpleHashClear(SSHashObj *pHashObj) {
FREE_HASH_NODE(pNode);
pNode = pNext;
}
+ pHashObj->hashList[i] = NULL;
}
atomic_store_64(&pHashObj->size, 0);
}
@@ -324,15 +349,6 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) {
return (pHashObj->capacity * sizeof(void *)) + sizeof(SHNode) * tSimpleHashGetSize(pHashObj) + sizeof(SSHashObj);
}
-void *tSimpleHashGetKey(void *data, size_t *keyLen) {
- SHNode *node = (SHNode *)((char *)data - offsetof(SHNode, data));
- if (keyLen) {
- *keyLen = node->keyLen;
- }
-
- return POINTER_SHIFT(data, node->dataLen);
-}
-
void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
if (!pHashObj) {
return NULL;
@@ -341,53 +357,12 @@ void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
SHNode *pNode = NULL;
if (!data) {
- for (int32_t i = 0; i < pHashObj->capacity; ++i) {
- pNode = pHashObj->hashList[i];
- if (!pNode) {
- continue;
- }
- *iter = i;
- return GET_SHASH_NODE_DATA(pNode);
- }
- return NULL;
- }
-
- pNode = (SHNode *)((char *)data - offsetof(SHNode, data));
-
- if (pNode->next) {
- return GET_SHASH_NODE_DATA(pNode->next);
- }
-
- ++(*iter);
- for (int32_t i = *iter; i < pHashObj->capacity; ++i) {
- pNode = pHashObj->hashList[i];
- if (!pNode) {
- continue;
- }
- *iter = i;
- return GET_SHASH_NODE_DATA(pNode);
- }
-
- return NULL;
-}
-
-void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, int32_t *iter) {
- if (!pHashObj) {
- return NULL;
- }
-
- SHNode *pNode = NULL;
-
- if (!data) {
- for (int32_t i = 0; i < pHashObj->capacity; ++i) {
+ for (int32_t i = *iter; i < pHashObj->capacity; ++i) {
pNode = pHashObj->hashList[i];
if (!pNode) {
continue;
}
*iter = i;
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode);
}
return NULL;
@@ -396,9 +371,6 @@ void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, in
pNode = (SHNode *)((char *)data - offsetof(SHNode, data));
if (pNode->next) {
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode->next, pNode->next->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode->next);
}
@@ -409,9 +381,6 @@ void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, in
continue;
}
*iter = i;
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode);
}
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index 48af951773814d9979eb6d349670753ad4b036eb..168cd21c4478d9c1b50053fadf0e9dcdf518d4f4 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -97,7 +97,7 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t page
return pSortHandle;
}
-static int32_t sortComparClearup(SMsortComparParam* cmpParam) {
+static int32_t sortComparCleanup(SMsortComparParam* cmpParam) {
for(int32_t i = 0; i < cmpParam->numOfSources; ++i) {
SSortSource* pSource = cmpParam->pSources[i]; // NOTICE: pSource may be SGenericSource *, if it is SORT_MULTISOURCE_MERGE
blockDataDestroy(pSource->src.pBlock);
@@ -134,15 +134,14 @@ int32_t tsortAddSource(SSortHandle* pSortHandle, void* pSource) {
return TSDB_CODE_SUCCESS;
}
-static int32_t doAddNewExternalMemSource(SDiskbasedBuf *pBuf, SArray* pAllSources, SSDataBlock* pBlock, int32_t* sourceId) {
+static int32_t doAddNewExternalMemSource(SDiskbasedBuf *pBuf, SArray* pAllSources, SSDataBlock* pBlock, int32_t* sourceId, SArray* pPageIdList) {
SSortSource* pSource = taosMemoryCalloc(1, sizeof(SSortSource));
if (pSource == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
- pSource->pageIdList = getDataBufPagesIdList(pBuf, (*sourceId));
pSource->src.pBlock = pBlock;
-
+ pSource->pageIdList = pPageIdList;
taosArrayPush(pAllSources, &pSource);
(*sourceId) += 1;
@@ -171,6 +170,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
}
+ SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t));
while(start < pDataBlock->info.rows) {
int32_t stop = 0;
blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pHandle->pageSize);
@@ -180,12 +180,14 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
blockDataDestroy(p);
return terrno;
}
+ taosArrayPush(pPageIdList, &pageId);
+
int32_t size = blockDataGetSize(p) + sizeof(int32_t) + taosArrayGetSize(p->pDataBlock) * sizeof(int32_t);
assert(size <= getBufPageSize(pHandle->pBuf));
@@ -201,7 +203,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
blockDataCleanup(pDataBlock);
SSDataBlock* pBlock = createOneDataBlock(pDataBlock, false);
- return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId);
+ return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId, pPageIdList);
}
static void setCurrentSourceIsDone(SSortSource* pSource, SSortHandle* pHandle) {
@@ -502,6 +504,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
return code;
}
+ SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t));
while (1) {
SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows);
if (pDataBlock == NULL) {
@@ -509,11 +512,13 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
return terrno;
}
+ taosArrayPush(pPageIdList, &pageId);
+
int32_t size = blockDataGetSize(pDataBlock) + sizeof(int32_t) + taosArrayGetSize(pDataBlock->pDataBlock) * sizeof(int32_t);
assert(size <= getBufPageSize(pHandle->pBuf));
@@ -525,12 +530,12 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
blockDataCleanup(pDataBlock);
}
- sortComparClearup(&pHandle->cmpParam);
+ sortComparCleanup(&pHandle->cmpParam);
tMergeTreeDestroy(pHandle->pMergeTree);
pHandle->numOfCompletedSources = 0;
SSDataBlock* pBlock = createOneDataBlock(pHandle->pDataBlock, false);
- code = doAddNewExternalMemSource(pHandle->pBuf, pResList, pBlock, &pHandle->sourceId);
+ code = doAddNewExternalMemSource(pHandle->pBuf, pResList, pBlock, &pHandle->sourceId, pPageIdList);
if (code != 0) {
return code;
}
diff --git a/source/libs/executor/test/executorTests.cpp b/source/libs/executor/test/executorTests.cpp
index bba4b254c5d56f2c72988897273d363a3fec3c0c..1c4216334945c0b682e313a975e558390fbd7049 100644
--- a/source/libs/executor/test/executorTests.cpp
+++ b/source/libs/executor/test/executorTests.cpp
@@ -26,7 +26,6 @@
#include "executor.h"
#include "executorimpl.h"
#include "function.h"
-#include "stub.h"
#include "taos.h"
#include "tdatablock.h"
#include "tdef.h"
diff --git a/source/libs/executor/test/lhashTests.cpp b/source/libs/executor/test/lhashTests.cpp
index 695552faa0f353cc631b87cf03f51003c7b66aed..c9b75395bce345802ff0e563762758601aca0a18 100644
--- a/source/libs/executor/test/lhashTests.cpp
+++ b/source/libs/executor/test/lhashTests.cpp
@@ -26,40 +26,47 @@
TEST(testCase, linear_hash_Tests) {
taosSeedRand(taosGetTimestampSec());
+ strcpy(tsTempDir, "/tmp/");
_hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT);
-#if 0
- SLHashObj* pHashObj = tHashInit(256, 4096, fn, 320);
- for(int32_t i = 0; i < 5000000; ++i) {
+
+ int64_t st = taosGetTimestampUs();
+
+ SLHashObj* pHashObj = tHashInit(4098*4*2, 512, fn, 40);
+ for(int32_t i = 0; i < 1000000; ++i) {
int32_t code = tHashPut(pHashObj, &i, sizeof(i), &i, sizeof(i));
assert(code == 0);
}
// tHashPrint(pHashObj, LINEAR_HASH_STATIS);
+ int64_t et = taosGetTimestampUs();
-// for(int32_t i = 0; i < 10000; ++i) {
-// char* v = tHashGet(pHashObj, &i, sizeof(i));
-// if (v != NULL) {
-//// printf("find value: %d, key:%d\n", *(int32_t*) v, i);
-// } else {
+ for(int32_t i = 0; i < 1000000; ++i) {
+ if (i == 950000) {
+ printf("kf\n");
+ }
+ char* v = tHashGet(pHashObj, &i, sizeof(i));
+ if (v != NULL) {
+// printf("find value: %d, key:%d\n", *(int32_t*) v, i);
+ } else {
// printf("failed to found key:%d in hash\n", i);
-// }
-// }
+ }
+ }
- tHashPrint(pHashObj, LINEAR_HASH_STATIS);
+// tHashPrint(pHashObj, LINEAR_HASH_STATIS);
tHashCleanup(pHashObj);
-#endif
+ int64_t et1 = taosGetTimestampUs();
-#if 0
- SHashObj* pHashObj = taosHashInit(1000, fn, false, HASH_NO_LOCK);
+ SHashObj* pHashObj1 = taosHashInit(1000, fn, false, HASH_NO_LOCK);
for(int32_t i = 0; i < 1000000; ++i) {
- taosHashPut(pHashObj, &i, sizeof(i), &i, sizeof(i));
+ taosHashPut(pHashObj1, &i, sizeof(i), &i, sizeof(i));
}
- for(int32_t i = 0; i < 10000; ++i) {
- void* v = taosHashGet(pHashObj, &i, sizeof(i));
+ for(int32_t i = 0; i < 1000000; ++i) {
+ void* v = taosHashGet(pHashObj1, &i, sizeof(i));
}
- taosHashCleanup(pHashObj);
-#endif
+ taosHashCleanup(pHashObj1);
+ int64_t et2 = taosGetTimestampUs();
+ printf("linear hash time:%.2f ms, buildHash:%.2f ms, hash:%.2f\n", (et1-st)/1000.0, (et-st)/1000.0, (et2-et1)/1000.0);
}
\ No newline at end of file
diff --git a/source/libs/executor/test/sortTests.cpp b/source/libs/executor/test/sortTests.cpp
index 6e244152f20e0d4b914b21fcb871a5bbec871fce..4ac15670ac5dca547572df102f7267de08c0306d 100644
--- a/source/libs/executor/test/sortTests.cpp
+++ b/source/libs/executor/test/sortTests.cpp
@@ -27,7 +27,6 @@
#include "executorimpl.h"
#include "executor.h"
-#include "stub.h"
#include "taos.h"
#include "tdatablock.h"
#include "tdef.h"
@@ -196,7 +195,7 @@ int32_t docomp(const void* p1, const void* p2, void* param) {
}
} // namespace
-#if 1
+#if 0
TEST(testCase, inMem_sort_Test) {
SBlockOrderInfo oi = {0};
oi.order = TSDB_ORDER_ASC;
@@ -382,7 +381,7 @@ TEST(testCase, ordered_merge_sort_Test) {
}
void* v = tsortGetValue(pTupleHandle, 0);
- printf("%d: %d\n", row, *(int32_t*) v);
+// printf("%d: %d\n", row, *(int32_t*) v);
ASSERT_EQ(row++, *(int32_t*) v);
}
diff --git a/source/libs/executor/test/tSimpleHashTests.cpp b/source/libs/executor/test/tSimpleHashTests.cpp
index acb6d434b484057196067954df13eeb4bcd602b3..3bf339ef9040879c0978f9bedffb2b23bd8ec806 100644
--- a/source/libs/executor/test/tSimpleHashTests.cpp
+++ b/source/libs/executor/test/tSimpleHashTests.cpp
@@ -30,7 +30,7 @@
// return RUN_ALL_TESTS();
// }
-TEST(testCase, tSimpleHashTest) {
+TEST(testCase, tSimpleHashTest_intKey) {
SSHashObj *pHashObj =
tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
@@ -57,12 +57,14 @@ TEST(testCase, tSimpleHashTest) {
int32_t iter = 0;
int64_t keySum = 0;
int64_t dataSum = 0;
+ size_t kLen = 0;
while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
- void *key = tSimpleHashGetKey(data, NULL);
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
keySum += *(int64_t *)key;
dataSum += *(int64_t *)data;
}
-
+
ASSERT_EQ(keySum, dataSum);
ASSERT_EQ(keySum, originKeySum);
@@ -74,4 +76,69 @@ TEST(testCase, tSimpleHashTest) {
tSimpleHashCleanup(pHashObj);
}
+
+TEST(testCase, tSimpleHashTest_binaryKey) {
+ SSHashObj *pHashObj =
+ tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
+
+ assert(pHashObj != nullptr);
+
+ ASSERT_EQ(0, tSimpleHashGetSize(pHashObj));
+
+ typedef struct {
+ int64_t suid;
+ int64_t uid;
+ } SCombineKey;
+
+ size_t keyLen = sizeof(SCombineKey);
+ size_t dataLen = sizeof(int64_t);
+
+ int64_t originDataSum = 0;
+ SCombineKey combineKey = {0};
+ for (int64_t i = 1; i <= 100; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ tSimpleHashPut(pHashObj, (const void *)&combineKey, keyLen, (const void *)&i, dataLen);
+ originDataSum += i;
+ ASSERT_EQ(i, tSimpleHashGetSize(pHashObj));
+ }
+
+ for (int64_t i = 1; i <= 100; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ void *data = tSimpleHashGet(pHashObj, (const void *)&combineKey, keyLen);
+ ASSERT_EQ(i, *(int64_t *)data);
+ }
+
+ void *data = NULL;
+ int32_t iter = 0;
+ int64_t keySum = 0;
+ int64_t dataSum = 0;
+ size_t kLen = 0;
+ while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
+ dataSum += *(int64_t *)data;
+ }
+
+ ASSERT_EQ(originDataSum, dataSum);
+
+ tSimpleHashRemove(pHashObj, (const void *)&combineKey, keyLen);
+
+ while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
+ }
+
+ for (int64_t i = 1; i <= 99; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ tSimpleHashRemove(pHashObj, (const void *)&combineKey, keyLen);
+ ASSERT_EQ(99 - i, tSimpleHashGetSize(pHashObj));
+ }
+
+ tSimpleHashCleanup(pHashObj);
+}
+
+
#pragma GCC diagnostic pop
\ No newline at end of file
diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h
index dfb52f76946c502b38231130858b5694b7171f35..554f9e567f35cc0272a2a9755153de1b54d34392 100644
--- a/source/libs/function/inc/tpercentile.h
+++ b/source/libs/function/inc/tpercentile.h
@@ -51,20 +51,20 @@ struct tMemBucket;
typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *value);
typedef struct tMemBucket {
- int16_t numOfSlots;
- int16_t type;
- int16_t bytes;
- int32_t total;
- int32_t elemPerPage; // number of elements for each object
- int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
- int32_t bufPageSize; // disk page size
- MinMaxEntry range; // value range
- int32_t times; // count that has been checked for deciding the correct data value buckets.
- __compar_fn_t comparFn;
-
- tMemBucketSlot * pSlots;
- SDiskbasedBuf *pBuffer;
- __perc_hash_func_t hashFunc;
+ int16_t numOfSlots;
+ int16_t type;
+ int16_t bytes;
+ int32_t total;
+ int32_t elemPerPage; // number of elements for each object
+ int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
+ int32_t bufPageSize; // disk page size
+ MinMaxEntry range; // value range
+ int32_t times; // count that has been checked for deciding the correct data value buckets.
+ __compar_fn_t comparFn;
+ tMemBucketSlot* pSlots;
+ SDiskbasedBuf* pBuffer;
+ __perc_hash_func_t hashFunc;
+ SHashObj* groupPagesMap; // disk page map for different groups;
} tMemBucket;
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval);
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index ed82e4cb50cd2ce72ab3e9965b7ef1481fe2ccfa..648ae5a538caed9fdb896bc4f1e6fe7537a7948a 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -303,7 +303,7 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- if (!IS_VAR_DATA_TYPE(pPara1->resType.type)) {
+ if (!IS_STR_DATA_TYPE(pPara1->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -311,13 +311,29 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
return TSDB_CODE_SUCCESS;
}
+static int32_t translateMinMax(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
+ if (1 != LIST_LENGTH(pFunc->pParameterList)) {
+ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ if (!IS_TIMESTAMP_TYPE(paraType) && !IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ } else if (IS_NULL_TYPE(paraType)) {
+ paraType = TSDB_DATA_TYPE_BIGINT;
+ }
+
+ pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType};
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- if (!IS_VAR_DATA_TYPE(pPara1->resType.type)) {
+ if (!IS_STR_DATA_TYPE(pPara1->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -546,7 +562,7 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t
// param2
if (3 == numOfParams) {
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
- if (!IS_VAR_DATA_TYPE(para3Type)) {
+ if (!IS_STR_DATA_TYPE(para3Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -593,7 +609,7 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int
// param2
if (3 == numOfParams) {
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
- if (!IS_VAR_DATA_TYPE(para3Type)) {
+ if (!IS_STR_DATA_TYPE(para3Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -698,7 +714,7 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -713,7 +729,7 @@ static int32_t translateSpreadImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (isPartial) {
- if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = (SDataType){.bytes = getSpreadInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
@@ -788,7 +804,7 @@ static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1388,7 +1404,7 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
// set result type
- if (IS_VAR_DATA_TYPE(colType)) {
+ if (IS_STR_DATA_TYPE(colType)) {
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
} else {
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
@@ -1431,7 +1447,7 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
// set result type
- if (IS_VAR_DATA_TYPE(colType)) {
+ if (IS_STR_DATA_TYPE(colType)) {
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
} else {
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
@@ -1514,7 +1530,7 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
for (int32_t i = 1; i < 3; ++i) {
nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i));
paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) {
+ if (!IS_STR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1634,7 +1650,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType &&
- TSDB_DATA_TYPE_TIMESTAMP != colType) {
+ !IS_TIMESTAMP_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1660,7 +1676,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
uint8_t resType;
- if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType || TSDB_DATA_TYPE_TIMESTAMP == colType) {
+ if (IS_SIGNED_NUMERIC_TYPE(colType) || IS_TIMESTAMP_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
resType = TSDB_DATA_TYPE_BIGINT;
} else {
resType = TSDB_DATA_TYPE_DOUBLE;
@@ -1682,7 +1698,7 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- if (!IS_VAR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
+ if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1714,7 +1730,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
for (int32_t i = 0; i < numOfParams; ++i) {
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
+ if (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
if (TSDB_DATA_TYPE_NCHAR == paraType) {
@@ -1770,7 +1786,7 @@ static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
uint8_t para0Type = pPara0->resType.type;
uint8_t para1Type = pPara1->resType.type;
- if (!IS_VAR_DATA_TYPE(para0Type) || !IS_INTEGER_TYPE(para1Type)) {
+ if (!IS_STR_DATA_TYPE(para0Type) || !IS_INTEGER_TYPE(para1Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1802,7 +1818,7 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
uint8_t para2Type = pFunc->node.resType.type;
int32_t para2Bytes = pFunc->node.resType.bytes;
- if (IS_VAR_DATA_TYPE(para2Type)) {
+ if (IS_STR_DATA_TYPE(para2Type)) {
para2Bytes -= VARSTR_HEADER_SIZE;
}
if (para2Bytes <= 0 || para2Bytes > 4096) { // cast dst var type length limits to 4096 bytes
@@ -1825,7 +1841,7 @@ static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t l
// param0
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (!IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1859,7 +1875,7 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- if (!IS_VAR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
+ if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1878,7 +1894,7 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
- if ((!IS_VAR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && TSDB_DATA_TYPE_TIMESTAMP != para1Type) ||
+ if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && !IS_TIMESTAMP_TYPE(para1Type)) ||
!IS_INTEGER_TYPE(para2Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1911,7 +1927,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le
for (int32_t i = 0; i < 2; ++i) {
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
}
@@ -2060,7 +2076,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "min",
.type = FUNCTION_TYPE_MIN,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC,
- .translateFunc = translateInOutNum,
+ .translateFunc = translateMinMax,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minmaxFunctionSetup,
@@ -2075,7 +2091,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "max",
.type = FUNCTION_TYPE_MAX,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC,
- .translateFunc = translateInOutNum,
+ .translateFunc = translateMinMax,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minmaxFunctionSetup,
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 013c58cc4501c091cc745330b584174064aff404..7160541c13538eb7f8e2a5f7cd0d00aaa237ad8d 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -1146,8 +1146,9 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
return true;
}
-static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
-static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock);
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos);
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
// the data is loaded, not only the block SMA value
@@ -1159,6 +1160,7 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c
}
ASSERT(0);
+ return 0;
}
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
@@ -1199,10 +1201,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
pBuf->v = *(int64_t*)tval;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
} else {
- if (IS_SIGNED_NUMERIC_TYPE(type)) {
+ if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) {
int64_t prev = 0;
GET_TYPED_DATA(prev, int64_t, type, &pBuf->v);
@@ -1211,10 +1213,9 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(int64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
-
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
uint64_t prev = 0;
GET_TYPED_DATA(prev, uint64_t, type, &pBuf->v);
@@ -1224,7 +1225,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(uint64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
@@ -1236,7 +1237,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(double*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
@@ -1250,7 +1251,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
}
@@ -1262,7 +1263,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
int32_t start = pInput->startRowIndex;
int32_t numOfRows = pInput->numOfRows;
- if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
+ if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
if (type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_BOOL) {
int8_t* pData = (int8_t*)pCol->pData;
int8_t* val = (int8_t*)&pBuf->v;
@@ -1275,7 +1276,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1287,7 +1288,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1306,7 +1307,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1318,7 +1319,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1337,7 +1338,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1349,14 +1350,15 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
numOfElems += 1;
}
- } else if (type == TSDB_DATA_TYPE_BIGINT) {
+ } else if (type == TSDB_DATA_TYPE_BIGINT ||
+ type == TSDB_DATA_TYPE_TIMESTAMP) {
int64_t* pData = (int64_t*)pCol->pData;
int64_t* val = (int64_t*)&pBuf->v;
@@ -1368,7 +1370,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1380,7 +1382,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1401,7 +1403,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1413,7 +1415,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1432,7 +1434,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1444,7 +1446,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1463,7 +1465,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1475,7 +1477,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1494,7 +1496,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1506,7 +1508,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1526,7 +1528,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1538,7 +1540,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1557,7 +1559,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1569,7 +1571,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1580,7 +1582,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
_min_max_over:
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
+ pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pBuf->nullTupleSaved = true;
}
return numOfElems;
@@ -1599,8 +1601,7 @@ int32_t maxFunction(SqlFunctionCtx* pCtx) {
}
static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex);
-
-static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rIndex);
+static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex);
int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
@@ -1648,34 +1649,29 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
return;
}
- int32_t pageId = pTuplePos->pageId;
- int32_t offset = pTuplePos->offset;
+ if (pCtx->saveHandle.pBuf != NULL) {
+ if (pTuplePos->pageId != -1) {
+ int32_t numOfCols = pCtx->subsidiaries.num;
+ const char* p = loadTupleData(pCtx, pTuplePos);
- if (pTuplePos->pageId != -1) {
- int32_t numOfCols = pCtx->subsidiaries.num;
- SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
-
- bool* nullList = (bool*)((char*)pPage + offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
-
- // todo set the offset value to optimize the performance.
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ bool* nullList = (bool*)p;
+ char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
+ // todo set the offset value to optimize the performance.
+ for (int32_t j = 0; j < numOfCols; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
- SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
- ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
- if (nullList[j]) {
- colDataAppendNULL(pDstCol, rowIndex);
- } else {
- colDataAppend(pDstCol, rowIndex, pStart, false);
+ SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
+ ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
+ if (nullList[j]) {
+ colDataAppendNULL(pDstCol, rowIndex);
+ } else {
+ colDataAppend(pDstCol, rowIndex, pStart, false);
+ }
+ pStart += pDstCol->info.bytes;
}
- pStart += pDstCol->info.bytes;
}
-
- releaseBufPage(pCtx->pBuf, pPage);
}
}
@@ -2756,15 +2752,15 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde
return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex);
}
-static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
+static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
if (pCtx->subsidiaries.num <= 0) {
return;
}
if (!pInfo->hasResult) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock);
} else {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
}
}
@@ -2778,7 +2774,7 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur
memcpy(pInfo->buf, pData, pInfo->bytes);
pInfo->ts = currentTs;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -2982,7 +2978,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
pOutput->bytes = pInput->bytes;
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
- saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
+ firstlastSaveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
pOutput->hasResult = true;
}
@@ -3087,7 +3083,7 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i
}
pInfo->ts = cts;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -3420,7 +3416,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
return TSDB_CODE_SUCCESS;
@@ -3448,7 +3444,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
@@ -3500,7 +3496,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
@@ -3524,7 +3520,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple by over writing the old data
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
@@ -3541,38 +3537,13 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
* |(n columns, one bit for each column)| src column #1| src column #2|
* +------------------------------------+--------------+--------------+
*/
-void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = NULL;
-
- // todo refactor: move away
- int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool);
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
- completeRowSize += pc->pExpr->base.resSchema.bytes;
- }
+void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsidiaryResInfo* pSubsidiaryies, char* buf) {
+ char* nullList = buf;
+ char* pStart = (char*)(nullList + sizeof(bool) * pSubsidiaryies->num);
- if (pCtx->curBufPage == -1) {
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- } else {
- pPage = getBufPage(pCtx->pBuf, pCtx->curBufPage);
- if (pPage->num + completeRowSize > getBufPageSize(pCtx->pBuf)) {
- // current page is all used, let's prepare a new buffer page
- releaseBufPage(pCtx->pBuf, pPage);
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- }
- }
-
- pPos->pageId = pCtx->curBufPage;
- pPos->offset = pPage->num;
-
- // keep the current row data, extract method
int32_t offset = 0;
- bool* nullList = (bool*)((char*)pPage + pPage->num);
- char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num);
- for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
+ for (int32_t i = 0; i < pSubsidiaryies->num; ++i) {
+ SqlFunctionCtx* pc = pSubsidiaryies->pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
@@ -3593,57 +3564,95 @@ void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock*
offset += pCol->info.bytes;
}
- pPage->num += completeRowSize;
-
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset);
-#endif
+ return buf;
}
-void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
+static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) {
+ STuplePos p = {0};
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = NULL;
- int32_t numOfCols = pCtx->subsidiaries.num;
+ if (pHandle->currentPage == -1) {
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
+ } else {
+ pPage = getBufPage(pHandle->pBuf, pHandle->currentPage);
+ if (pPage->num + length > getBufPageSize(pHandle->pBuf)) {
+ // current page is all used, let's prepare a new buffer page
+ releaseBufPage(pHandle->pBuf, pPage);
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
+ }
+ }
- bool* nullList = (bool*)((char*)pPage + pPos->offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
+ p = (STuplePos) {.pageId = pHandle->currentPage, .offset = pPage->num};
+ memcpy(pPage->data + pPage->num, pBuf, length);
- int32_t offset = 0;
- for (int32_t i = 0; i < numOfCols; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t srcSlotId = pFuncParam->pCol->slotId;
+ pPage->num += length;
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+ // other tuple save policy
+ }
- SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
- if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
- offset += pCol->info.bytes;
- continue;
- }
+ return p;
+}
- char* p = colDataGetData(pCol, rowIndex);
- if (IS_VAR_DATA_TYPE(pCol->info.type)) {
- memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
- } else {
- memcpy(pStart + offset, p, pCol->info.bytes);
+STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) {
+ if (pCtx->subsidiaries.rowLen == 0) {
+ int32_t rowLen = 0;
+ for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ rowLen += pc->pExpr->base.resSchema.bytes;
}
- offset += pCol->info.bytes;
+ pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool);
+ pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen);
}
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_copyTuple pos:%p, pageId:%d, offset:%d", pPos, pPos->pageId, pPos->offset);
-#endif
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen);
+}
+
+static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ memcpy(pPage->data + pPos->offset, pBuf, length);
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos);
+ return TSDB_CODE_SUCCESS;
+}
+
+static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ char* p = pPage->data + pPos->offset;
+ releaseBufPage(pHandle->pBuf, pPage);
+ return p;
+ } else {
+ return NULL;
+ }
+}
+
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) {
+ return doLoadTupleData(&pCtx->saveHandle, pPos);
}
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
STopBotRes* pRes = getTopBotOutputInfo(pCtx);
- int16_t type = pCtx->input.pData[0]->info.type;
+ int16_t type = pCtx->pExpr->base.resSchema.type;
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
@@ -3788,8 +3797,6 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) {
SColumnInfoData* pCol = pInput->pData[0];
int32_t start = pInput->startRowIndex;
- int32_t numOfRows = pInput->numOfRows;
-
// check the valid data one by one
for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
if (colDataIsNull_f(pCol->nullbitmap, i)) {
@@ -4964,7 +4971,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (pInfo->numSampled < pInfo->samples) {
sampleAssignResult(pInfo, data, pInfo->numSampled);
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]);
+ pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
pInfo->numSampled++;
} else {
@@ -4972,7 +4979,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (j < pInfo->samples) {
sampleAssignResult(pInfo, data, j);
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
+ updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
}
}
}
@@ -4995,7 +5002,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
}
if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos);
+ pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pInfo->nullTupleSaved = true;
}
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 152a970c48eb5fb374f8806062d264e53b88f664..26735fa263cfed15ead940493b3c1eadf0e29c70 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -101,6 +101,14 @@ bool fmIsBuiltinFunc(const char* pFunc) {
return NULL != taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
}
+EFunctionType fmGetFuncType(const char* pFunc) {
+ void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
+ if (NULL != pVal) {
+ return funcMgtBuiltins[*(int32_t*)pVal].type;
+ }
+ return FUNCTION_TYPE_UDF;
+}
+
EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) {
if (fmIsUserDefinedFunc(pFunc->funcId) || pFunc->funcId < 0 || pFunc->funcId >= funcMgtBuiltinsNum) {
return FUNC_DATA_REQUIRED_DATA_LOAD;
diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c
index 517253dc01691754425bd93c40bfef2a2750eed5..4c58c0abe50e5784314445934618265231d4805a 100644
--- a/source/libs/function/src/tpercentile.c
+++ b/source/libs/function/src/tpercentile.c
@@ -33,13 +33,13 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx)
SFilePage *buffer = (SFilePage *)taosMemoryCalloc(1, pMemBucket->bytes * pMemBucket->pSlots[slotIdx].info.size + sizeof(SFilePage));
int32_t groupId = getGroupId(pMemBucket->numOfSlots, slotIdx, pMemBucket->times);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SArray* pIdList = *(SArray**)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
int32_t offset = 0;
- for(int32_t i = 0; i < list->size; ++i) {
- struct SPageInfo* pgInfo = *(struct SPageInfo**) taosArrayGet(list, i);
+ for(int32_t i = 0; i < taosArrayGetSize(pIdList); ++i) {
+ int32_t* pageId = taosArrayGet(pIdList, i);
- SFilePage* pg = getBufPage(pMemBucket->pBuffer, getPageId(pgInfo));
+ SFilePage* pg = getBufPage(pMemBucket->pBuffer, *pageId);
memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes));
offset += (int32_t)(pg->num * pMemBucket->bytes);
@@ -97,11 +97,11 @@ double findOnlyResult(tMemBucket *pMemBucket) {
}
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SArray* list = *(SArray**)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
assert(list->size == 1);
- struct SPageInfo* pgInfo = (struct SPageInfo*) taosArrayGetP(list, 0);
- SFilePage* pPage = getBufPage(pMemBucket->pBuffer, getPageId(pgInfo));
+ int32_t* pageId = taosArrayGet(list, 0);
+ SFilePage* pPage = getBufPage(pMemBucket->pBuffer, *pageId);
assert(pPage->num == 1);
double v = 0;
@@ -233,7 +233,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
pBucket->times = 1;
pBucket->maxCapacity = 200000;
-
+ pBucket->groupPagesMap = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (setBoundingBox(&pBucket->range, pBucket->type, minval, maxval) != 0) {
// qError("MemBucket:%p, invalid value range: %f-%f", pBucket, minval, maxval);
taosMemoryFree(pBucket);
@@ -280,8 +280,16 @@ void tMemBucketDestroy(tMemBucket *pBucket) {
return;
}
+ void* p = taosHashIterate(pBucket->groupPagesMap, NULL);
+ while(p) {
+ SArray** p1 = p;
+ p = taosHashIterate(pBucket->groupPagesMap, p);
+ taosArrayDestroy(*p1);
+ }
+
destroyDiskbasedBuf(pBucket->pBuffer);
taosMemoryFreeClear(pBucket->pSlots);
+ taosHashCleanup(pBucket->groupPagesMap);
taosMemoryFreeClear(pBucket);
}
@@ -357,8 +365,16 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
pSlot->info.data = NULL;
}
- pSlot->info.data = getNewBufPage(pBucket->pBuffer, groupId, &pageId);
+ SArray* pPageIdList = (SArray*)taosHashGet(pBucket->groupPagesMap, &groupId, sizeof(groupId));
+ if (pPageIdList == NULL) {
+ SArray* pList = taosArrayInit(4, sizeof(int32_t));
+ taosHashPut(pBucket->groupPagesMap, &groupId, sizeof(groupId), &pList, POINTER_BYTES);
+ pPageIdList = pList;
+ }
+
+ pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId);
pSlot->info.pageId = pageId;
+ taosArrayPush(pPageIdList, &pageId);
}
memcpy(pSlot->info.data->data + pSlot->info.data->num * pBucket->bytes, d, pBucket->bytes);
@@ -476,7 +492,7 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction)
resetSlotInfo(pMemBucket);
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SIDList list = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
assert(list->size > 0);
for (int32_t f = 0; f < list->size; ++f) {
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index d5a3e91eeaa63723029617dfc7be1f72679508bd..5de9c52cc1d0f85c268d86a8e54d184a37c55df3 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -81,7 +81,7 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) {
taosDirName(path);
#endif
} else {
- strncpy(path, tsProcPath, strlen(tsProcPath));
+ strncpy(path, tsProcPath, PATH_MAX);
taosDirName(path);
}
#ifdef WINDOWS
diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c
index 1cbc78df48b1cbeb5d1645dcd945168f21d25ba6..a45e4585e8655d318a440e3357d50df2d2513e2c 100644
--- a/source/libs/function/src/udfd.c
+++ b/source/libs/function/src/udfd.c
@@ -84,6 +84,7 @@ typedef struct SUdf {
TUdfAggStartFunc aggStartFunc;
TUdfAggProcessFunc aggProcFunc;
TUdfAggFinishFunc aggFinishFunc;
+ TUdfAggMergeFunc aggMergeFunc;
TUdfInitFunc initFunc;
TUdfDestroyFunc destroyFunc;
@@ -271,6 +272,15 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
break;
}
+ case TSDB_UDF_CALL_AGG_MERGE: {
+ SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
+ code = udf->aggMergeFunc(&call->interBuf, &call->interBuf2, &outBuf);
+ freeUdfInterBuf(&call->interBuf);
+ freeUdfInterBuf(&call->interBuf2);
+ subRsp->resultBuf = outBuf;
+
+ break;
+ }
case TSDB_UDF_CALL_AGG_FIN: {
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
code = udf->aggFinishFunc(&call->interBuf, &outBuf);
@@ -309,6 +319,10 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
freeUdfInterBuf(&subRsp->resultBuf);
break;
}
+ case TSDB_UDF_CALL_AGG_MERGE: {
+ freeUdfInterBuf(&subRsp->resultBuf);
+ break;
+ }
case TSDB_UDF_CALL_AGG_FIN: {
freeUdfInterBuf(&subRsp->resultBuf);
break;
@@ -439,7 +453,7 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
goto _return;
}
taosCloseFile(&file);
- strncpy(udf->path, path, strlen(path));
+ strncpy(udf->path, path, PATH_MAX);
tFreeSFuncInfo(pFuncInfo);
taosArrayDestroy(retrieveRsp.pFuncInfos);
msgInfo->code = 0;
@@ -552,15 +566,19 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->aggProcFunc));
char startFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *startSuffix = "_start";
- strncpy(startFuncName, processFuncName, strlen(processFuncName));
+ strncpy(startFuncName, processFuncName, sizeof(startFuncName));
strncat(startFuncName, startSuffix, strlen(startSuffix));
uv_dlsym(&udf->lib, startFuncName, (void **)(&udf->aggStartFunc));
char finishFuncName[TSDB_FUNC_NAME_LEN + 7] = {0};
char *finishSuffix = "_finish";
- strncpy(finishFuncName, processFuncName, strlen(processFuncName));
+ strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
strncat(finishFuncName, finishSuffix, strlen(finishSuffix));
uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc));
- // TODO: merge
+ char mergeFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
+ char *mergeSuffix = "_merge";
+ strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
+ strncat(finishFuncName, mergeSuffix, strlen(mergeSuffix));
+ uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggMergeFunc));
}
return 0;
}
diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c
index 4f33d98f9e4f7e5b210922b0dd6da0b5448d4472..cd52d122f781e3210448904af7b9ac0d3b4f9046 100644
--- a/source/libs/index/src/indexComm.c
+++ b/source/libs/index/src/indexComm.c
@@ -171,6 +171,7 @@ TExeCond tCompare(__compar_fn_t func, int8_t cmptype, void* a, void* b, int8_t d
return tDoCompare(func, cmptype, &va, &vb);
}
assert(0);
+ return BREAK;
#endif
}
TExeCond tDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) {
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index 21aeaba70bb02f6f44c2fc6d40d07515201ee25a..75844ce76f1cb50d6847709309dae1ed3f77bf70 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -255,6 +255,13 @@ static int32_t sifInitOperParams(SIFParam **params, SOperatorNode *node, SIFCtx
if (node->opType == OP_TYPE_JSON_GET_VALUE) {
return code;
}
+ if ((node->pLeft != NULL && nodeType(node->pLeft) == QUERY_NODE_COLUMN) &&
+ (node->pRight != NULL && nodeType(node->pRight) == QUERY_NODE_VALUE)) {
+ SColumnNode *cn = (SColumnNode *)(node->pLeft);
+ if (cn->node.resType.type == TSDB_DATA_TYPE_JSON) {
+ SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
+ }
+ }
SIFParam *paramList = taosMemoryCalloc(nParam, sizeof(SIFParam));
if (NULL == paramList) {
diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c
index 4f278c7af6adfa8ed4e890b06944d5d5c9560f43..1900e50973ade5278482162df05d1cb528365238 100644
--- a/source/libs/index/src/indexFstFile.c
+++ b/source/libs/index/src/indexFstFile.c
@@ -39,7 +39,8 @@ static void idxGenLRUKey(char* buf, const char* path, int32_t blockId) {
}
static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) {
if (ctx->type == TFILE) {
- assert(len == taosWriteFile(ctx->file.pFile, buf, len));
+ int nwr = taosWriteFile(ctx->file.pFile, buf, len);
+ assert(nwr == len);
} else {
memcpy(ctx->mem.buf + ctx->offset, buf, len);
}
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index 9390d129df4f536070320246555d690ab8b4972c..eb0b604d37459342a403ab5120c22e2bf5dc4b13 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -545,6 +545,7 @@ static int32_t physiSysTableScanCopy(const SSystemTableScanPhysiNode* pSrc, SSys
COPY_OBJECT_FIELD(mgmtEpSet, sizeof(SEpSet));
COPY_SCALAR_FIELD(showRewrite);
COPY_SCALAR_FIELD(accountId);
+ COPY_SCALAR_FIELD(sysInfo);
return TSDB_CODE_SUCCESS;
}
@@ -776,6 +777,7 @@ SNode* nodesCloneNode(const SNode* pNode) {
code = physiSessionCopy((const SSessionWinodwPhysiNode*)pNode, (SSessionWinodwPhysiNode*)pDst);
break;
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
code = physiPartitionCopy((const SPartitionPhysiNode*)pNode, (SPartitionPhysiNode*)pDst);
break;
default:
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index 0f32001c476c2e98465a0ce3f07fb705c3f53244..8976daadbdada2365dbb2e3bf2af4f807c4ac26f 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -265,6 +265,8 @@ const char* nodesNodeName(ENodeType type) {
return "PhysiStreamStateWindow";
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return "PhysiPartition";
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ return "PhysiStreamPartition";
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return "PhysiIndefRowsFunc";
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
@@ -1654,6 +1656,7 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
static const char* jkSysTableScanPhysiPlanMnodeEpSet = "MnodeEpSet";
static const char* jkSysTableScanPhysiPlanShowRewrite = "ShowRewrite";
static const char* jkSysTableScanPhysiPlanAccountId = "AccountId";
+static const char* jkSysTableScanPhysiPlanSysInfo = "SysInfo";
static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) {
const SSystemTableScanPhysiNode* pNode = (const SSystemTableScanPhysiNode*)pObj;
@@ -1668,6 +1671,9 @@ static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddBoolToObject(pJson, jkSysTableScanPhysiPlanSysInfo, pNode->sysInfo);
+ }
return code;
}
@@ -1684,7 +1690,9 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) {
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId, code);
- ;
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetBoolValue(pJson, jkSysTableScanPhysiPlanSysInfo, &pNode->sysInfo);
}
return code;
@@ -4479,6 +4487,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
return physiStateWindowNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
return physiPartitionNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return physiIndefRowsFuncNodeToJson(pObj, pJson);
@@ -4626,6 +4635,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
return jsonToPhysiStateWindowNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
return jsonToPhysiPartitionNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return jsonToPhysiIndefRowsFuncNode(pJson, pObj);
diff --git a/source/libs/nodes/src/nodesToSQLFuncs.c b/source/libs/nodes/src/nodesToSQLFuncs.c
index e521c57c3d80eac9455ab9affa5a4b053983ef84..9325d0288636ca7e22fe4fdd3a8e50ff90cdf0de 100644
--- a/source/libs/nodes/src/nodesToSQLFuncs.c
+++ b/source/libs/nodes/src/nodesToSQLFuncs.c
@@ -135,7 +135,12 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
NODES_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
- *len += snprintf(buf + *len, bufSize - *len, "%s", t);
+ int32_t tlen = strlen(t);
+ if (tlen > 32) {
+ *len += snprintf(buf + *len, bufSize - *len, "%.*s...%s", 32, t, t + tlen - 1);
+ } else {
+ *len += snprintf(buf + *len, bufSize - *len, "%s", t);
+ }
taosMemoryFree(t);
return TSDB_CODE_SUCCESS;
@@ -199,12 +204,17 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
SNodeListNode *pListNode = (SNodeListNode *)pNode;
SNode *node = NULL;
bool first = true;
+ int32_t num = 0;
*len += snprintf(buf + *len, bufSize - *len, "(");
FOREACH(node, pListNode->pNodeList) {
if (!first) {
*len += snprintf(buf + *len, bufSize - *len, ", ");
+ if (++num >= 10) {
+ *len += snprintf(buf + *len, bufSize - *len, "...");
+ break;
+ }
}
NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len));
first = false;
diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c
index 2e23998aad9463fb7a4a9b6834ceab2f7ea51e55..728e173ff85e87d553d118f0baf0022a99c58f5d 100644
--- a/source/libs/nodes/src/nodesTraverseFuncs.c
+++ b/source/libs/nodes/src/nodesTraverseFuncs.c
@@ -537,7 +537,8 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
}
break;
}
- case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: {
SPartitionPhysiNode* pPart = (SPartitionPhysiNode*)pNode;
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index d13057a93e824c2b94d94a006664b4cbc4c2f870..61b2ad954f3c91ed8d3bc8f7b9fb76c3c49cda9c 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -322,6 +322,8 @@ SNode* nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SStreamStateWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return makeNode(type, sizeof(SPartitionPhysiNode));
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ return makeNode(type, sizeof(SStreamPartitionPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return makeNode(type, sizeof(SIndefRowsFuncPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
@@ -951,7 +953,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pPhyNode->pStateKey);
break;
}
- case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: {
SPartitionPhysiNode* pPhyNode = (SPartitionPhysiNode*)pNode;
destroyPhysiNode((SPhysiNode*)pPhyNode);
nodesDestroyList(pPhyNode->pExprs);
diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y
index 56e68d8374518ab7494371151513c099bc37ab80..9bff061d02fbfa8d5795dff82c9ec93b7093f96d 100644
--- a/source/libs/parser/inc/sql.y
+++ b/source/libs/parser/inc/sql.y
@@ -495,12 +495,9 @@ bufsize_opt(A) ::= BUFSIZE NK_INTEGER(B).
/************************************************ create/drop stream **************************************************/
cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A)
- stream_options(B) into_opt(C) AS query_expression(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, D); }
+ stream_options(B) INTO full_table_name(C) AS query_expression(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, D); }
cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); }
-into_opt(A) ::= . { A = NULL; }
-into_opt(A) ::= INTO full_table_name(B). { A = B; }
-
stream_options(A) ::= . { A = createStreamOptions(pCxt); }
stream_options(A) ::= stream_options(B) TRIGGER AT_ONCE. { ((SStreamOptions*)B)->triggerType = STREAM_TRIGGER_AT_ONCE; A = B; }
stream_options(A) ::= stream_options(B) TRIGGER WINDOW_CLOSE. { ((SStreamOptions*)B)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; A = B; }
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 4d0b0bbb2533111fe31d4810c58270bea5b22314..f39fed467764966092eb55313155903fdf7f8a54 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -795,6 +795,20 @@ SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode*
return (SNode*)setOp;
}
+static void updateWalOptionsDefault(SDatabaseOptions* pOptions) {
+ if (!pOptions->walRetentionPeriodIsSet) {
+ pOptions->walRetentionPeriod =
+ pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_RET_PERIOD : TSDB_REP_DEF_DB_WAL_RET_PERIOD;
+ }
+ if (!pOptions->walRetentionSizeIsSet) {
+ pOptions->walRetentionSize = pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_RET_SIZE : TSDB_REP_DEF_DB_WAL_RET_SIZE;
+ }
+ if (!pOptions->walRollPeriodIsSet) {
+ pOptions->walRollPeriod =
+ pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD : TSDB_REP_DEF_DB_WAL_ROLL_PERIOD;
+ }
+}
+
SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
CHECK_PARSER_STATUS(pCxt);
SDatabaseOptions* pOptions = (SDatabaseOptions*)nodesMakeNode(QUERY_NODE_DATABASE_OPTIONS);
@@ -819,9 +833,7 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->numOfVgroups = TSDB_DEFAULT_VN_PER_DB;
pOptions->singleStable = TSDB_DEFAULT_DB_SINGLE_STABLE;
pOptions->schemaless = TSDB_DEFAULT_DB_SCHEMALESS;
- pOptions->walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
- pOptions->walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- pOptions->walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ updateWalOptionsDefault(pOptions);
pOptions->walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
return (SNode*)pOptions;
}
@@ -859,78 +871,83 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) {
SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOptionType type, void* pVal) {
CHECK_PARSER_STATUS(pCxt);
+ SDatabaseOptions* pDbOptions = (SDatabaseOptions*)pOptions;
switch (type) {
case DB_OPTION_BUFFER:
- ((SDatabaseOptions*)pOptions)->buffer = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->buffer = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_CACHEMODEL:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->cacheModelStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->cacheModelStr, (SToken*)pVal);
break;
case DB_OPTION_CACHESIZE:
- ((SDatabaseOptions*)pOptions)->cacheLastSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->cacheLastSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_COMP:
- ((SDatabaseOptions*)pOptions)->compressionLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->compressionLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_DAYS: {
SToken* pToken = pVal;
if (TK_NK_INTEGER == pToken->type) {
- ((SDatabaseOptions*)pOptions)->daysPerFile = taosStr2Int32(pToken->z, NULL, 10) * 1440;
+ pDbOptions->daysPerFile = taosStr2Int32(pToken->z, NULL, 10) * 1440;
} else {
- ((SDatabaseOptions*)pOptions)->pDaysPerFile = (SValueNode*)createDurationValueNode(pCxt, pToken);
+ pDbOptions->pDaysPerFile = (SValueNode*)createDurationValueNode(pCxt, pToken);
}
break;
}
case DB_OPTION_FSYNC:
- ((SDatabaseOptions*)pOptions)->fsyncPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->fsyncPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_MAXROWS:
- ((SDatabaseOptions*)pOptions)->maxRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->maxRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_MINROWS:
- ((SDatabaseOptions*)pOptions)->minRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->minRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_KEEP:
- ((SDatabaseOptions*)pOptions)->pKeep = pVal;
+ pDbOptions->pKeep = pVal;
break;
case DB_OPTION_PAGES:
- ((SDatabaseOptions*)pOptions)->pages = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->pages = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_PAGESIZE:
- ((SDatabaseOptions*)pOptions)->pagesize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->pagesize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_PRECISION:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->precisionStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->precisionStr, (SToken*)pVal);
break;
case DB_OPTION_REPLICA:
- ((SDatabaseOptions*)pOptions)->replica = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->replica = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ updateWalOptionsDefault(pDbOptions);
break;
case DB_OPTION_STRICT:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->strictStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->strictStr, (SToken*)pVal);
break;
case DB_OPTION_WAL:
- ((SDatabaseOptions*)pOptions)->walLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_VGROUPS:
- ((SDatabaseOptions*)pOptions)->numOfVgroups = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->numOfVgroups = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_SINGLE_STABLE:
- ((SDatabaseOptions*)pOptions)->singleStable = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->singleStable = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_RETENTIONS:
- ((SDatabaseOptions*)pOptions)->pRetentions = pVal;
+ pDbOptions->pRetentions = pVal;
break;
case DB_OPTION_WAL_RETENTION_PERIOD:
- ((SDatabaseOptions*)pOptions)->walRetentionPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionPeriodIsSet = true;
break;
case DB_OPTION_WAL_RETENTION_SIZE:
- ((SDatabaseOptions*)pOptions)->walRetentionSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionSizeIsSet = true;
break;
case DB_OPTION_WAL_ROLL_PERIOD:
- ((SDatabaseOptions*)pOptions)->walRollPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRollPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRollPeriodIsSet = true;
break;
case DB_OPTION_WAL_SEGMENT_SIZE:
- ((SDatabaseOptions*)pOptions)->walSegmentSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walSegmentSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
default:
break;
@@ -1251,7 +1268,8 @@ SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
static bool needDbShowStmt(ENodeType type) {
return QUERY_NODE_SHOW_TABLES_STMT == type || QUERY_NODE_SHOW_STABLES_STMT == type ||
- QUERY_NODE_SHOW_VGROUPS_STMT == type;
+ QUERY_NODE_SHOW_VGROUPS_STMT == type || QUERY_NODE_SHOW_INDEXES_STMT == type ||
+ QUERY_NODE_SHOW_TAGS_STMT == type;
}
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type) {
@@ -1264,7 +1282,7 @@ SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type) {
SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbName,
EOperatorType tableCondType) {
CHECK_PARSER_STATUS(pCxt);
- if (needDbShowStmt(type) && NULL == pDbName && NULL == pCxt->pQueryCxt->db) {
+ if (needDbShowStmt(type) && NULL == pDbName) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified");
pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR;
return NULL;
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index ffa7729745021be10cfc22aa66dab7f7b3abccb3..aa9ead2e278c96daeff7cff8a7f44ba9fa5aa2a6 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -97,16 +97,23 @@ typedef struct SCollectMetaKeyCxt {
typedef struct SCollectMetaKeyFromExprCxt {
SCollectMetaKeyCxt* pComCxt;
+ bool hasLastRow;
int32_t errCode;
} SCollectMetaKeyFromExprCxt;
static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt);
static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) {
- if (fmIsBuiltinFunc(pFunc->functionName)) {
- return DEAL_RES_CONTINUE;
+ switch (fmGetFuncType(pFunc->functionName)) {
+ case FUNCTION_TYPE_LAST_ROW:
+ pCxt->hasLastRow = true;
+ break;
+ case FUNCTION_TYPE_UDF:
+ pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
+ break;
+ default:
+ break;
}
- pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
@@ -136,9 +143,6 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, const c
if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES))) {
code = reserveDnodeRequiredInCache(pCxt->pMetaCache);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pDb, pCxt->pMetaCache);
- }
return code;
}
@@ -185,9 +189,19 @@ static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOpera
return code;
}
+static int32_t reserveDbCfgForLastRow(SCollectMetaKeyCxt* pCxt, SNode* pTable) {
+ if (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return reserveDbCfgInCache(pCxt->pParseCxt->acctId, ((SRealTableNode*)pTable)->table.dbName, pCxt->pMetaCache);
+}
+
static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) {
- SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
+ SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .hasLastRow = false, .errCode = TSDB_CODE_SUCCESS};
nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt);
+ if (TSDB_CODE_SUCCESS == cxt.errCode && cxt.hasLastRow) {
+ cxt.errCode = reserveDbCfgForLastRow(pCxt, pStmt->pFromTable);
+ }
return cxt.errCode;
}
@@ -360,12 +374,17 @@ static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES,
- pCxt->pMetaCache);
+ int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES,
+ pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser,
+ ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ, pCxt->pMetaCache);
+ }
+ return code;
}
static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS,
pCxt->pMetaCache);
}
@@ -373,11 +392,11 @@ static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt*
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLES,
pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
- if (NULL != pStmt->pDbName) {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
- } else {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
- }
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser,
+ ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ, pCxt->pMetaCache);
}
return code;
}
@@ -386,11 +405,7 @@ static int32_t collectMetaKeyFromShowTags(SCollectMetaKeyCxt* pCxt, SShowStmt* p
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TAGS,
pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
- if (NULL != pStmt->pDbName) {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
- } else {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
- }
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
}
return code;
}
@@ -411,7 +426,7 @@ static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TOPICS,
pCxt->pMetaCache);
}
@@ -464,6 +479,9 @@ static int32_t collectMetaKeyFromShowCreateTable(SCollectMetaKeyCxt* pCxt, SShow
if (TSDB_CODE_SUCCESS == code) {
code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveUserAuthInCacheExt(pCxt->pParseCxt->pUser, &name, AUTH_TYPE_READ, pCxt->pMetaCache);
+ }
return code;
}
@@ -503,7 +521,7 @@ static int32_t collectMetaKeyFromShowBlockDist(SCollectMetaKeyCxt* pCxt, SShowTa
}
static int32_t collectMetaKeyFromShowSubscriptions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS,
pCxt->pMetaCache);
}
diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c
index befc822808c7b50eeaea5753a61bb10ffef81523..9d73be745468c33f6041f5f6bb2a9cd9bfb51b52 100644
--- a/source/libs/parser/src/parAuthenticator.c
+++ b/source/libs/parser/src/parAuthenticator.c
@@ -96,6 +96,14 @@ static int32_t authInsert(SAuthCxt* pCxt, SInsertStmt* pInsert) {
return code;
}
+static int32_t authShowTables(SAuthCxt* pCxt, SShowStmt* pStmt) {
+ return checkAuth(pCxt, ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ);
+}
+
+static int32_t authShowCreateTable(SAuthCxt* pCxt, SShowCreateTableStmt* pStmt) {
+ return checkAuth(pCxt, pStmt->dbName, AUTH_TYPE_READ);
+}
+
static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
switch (nodeType(pStmt)) {
case QUERY_NODE_SET_OPERATOR:
@@ -108,6 +116,27 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
return authDelete(pCxt, (SDeleteStmt*)pStmt);
case QUERY_NODE_INSERT_STMT:
return authInsert(pCxt, (SInsertStmt*)pStmt);
+ case QUERY_NODE_SHOW_DNODES_STMT:
+ case QUERY_NODE_SHOW_MNODES_STMT:
+ case QUERY_NODE_SHOW_MODULES_STMT:
+ case QUERY_NODE_SHOW_QNODES_STMT:
+ case QUERY_NODE_SHOW_SNODES_STMT:
+ case QUERY_NODE_SHOW_BNODES_STMT:
+ case QUERY_NODE_SHOW_CLUSTER_STMT:
+ case QUERY_NODE_SHOW_LICENCES_STMT:
+ case QUERY_NODE_SHOW_VGROUPS_STMT:
+ case QUERY_NODE_SHOW_VARIABLES_STMT:
+ case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
+ case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
+ case QUERY_NODE_SHOW_VNODES_STMT:
+ case QUERY_NODE_SHOW_SCORES_STMT:
+ return !pCxt->pParseCxt->enableSysInfo ? TSDB_CODE_PAR_PERMISSION_DENIED : TSDB_CODE_SUCCESS;
+ case QUERY_NODE_SHOW_TABLES_STMT:
+ case QUERY_NODE_SHOW_STABLES_STMT:
+ return authShowTables(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
+ case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
+ return authShowCreateTable(pCxt, (SShowCreateTableStmt*)pStmt);
default:
break;
}
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index de9f8156187514fe27020beec1e641e132d3cfff..162161b67a185bea356d00b426664efe4ced5ec3 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -125,6 +125,37 @@ static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) {
return TSDB_CODE_SUCCESS;
}
+static char* tableNameGetPosition(SToken* pToken, char target) {
+ bool inEscape = false;
+ bool inQuote = false;
+ char quotaStr = 0;
+
+ for (uint32_t i = 0; i < pToken->n; ++i) {
+ if (*(pToken->z + i) == target && (!inEscape) && (!inQuote)) {
+ return pToken->z + i;
+ }
+
+ if (*(pToken->z + i) == TS_ESCAPE_CHAR) {
+ if (!inQuote) {
+ inEscape = !inEscape;
+ }
+ }
+
+ if (*(pToken->z + i) == '\'' || *(pToken->z + i) == '"') {
+ if (!inEscape) {
+ if (!inQuote) {
+ quotaStr = *(pToken->z + i);
+ inQuote = !inQuote;
+ } else if (quotaStr == *(pToken->z + i)) {
+ inQuote = !inQuote;
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+
static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, const char* dbName, SMsgBuf* pMsgBuf) {
const char* msg1 = "name too long";
const char* msg2 = "invalid database name";
@@ -132,7 +163,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
const char* msg4 = "invalid table name";
int32_t code = TSDB_CODE_SUCCESS;
- char* p = strnchr(pTableName->z, TS_PATH_DELIMITER[0], pTableName->n, true);
+ char* p = tableNameGetPosition(pTableName, TS_PATH_DELIMITER[0]);
if (p != NULL) { // db has been specified in sql string so we ignore current db path
assert(*p == TS_PATH_DELIMITER[0]);
@@ -471,6 +502,10 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
return func(pMsgBuf, NULL, 0, param);
}
+ if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid numeric data", pToken->z);
+ }
+
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: {
if ((pToken->type == TK_NK_BOOL || pToken->type == TK_NK_STRING) && (pToken->n != 0)) {
@@ -751,6 +786,7 @@ static void buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, STag* pTa
if (sname) pTbReq->ctb.name = strdup(sname);
pTbReq->ctb.pTag = (uint8_t*)pTag;
pTbReq->ctb.tagName = taosArrayDup(tagName);
+ pTbReq->ttl = TSDB_DEFAULT_TABLE_TTL;
pTbReq->commentLen = -1;
return;
@@ -1086,6 +1122,40 @@ static int32_t ignoreAutoCreateTableClause(SInsertParseContext* pCxt) {
return code;
}
+static int32_t parseTableOptions(SInsertParseContext* pCxt) {
+ do {
+ int32_t index = 0;
+ SToken sToken;
+ NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index);
+ if (TK_TTL == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (TK_NK_INTEGER != sToken.type) {
+ return buildSyntaxErrMsg(&pCxt->msg, "Invalid option ttl", sToken.z);
+ }
+ pCxt->createTblReq.ttl = taosStr2Int32(sToken.z, NULL, 10);
+ } else if (TK_COMMENT == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (TK_NK_STRING != sToken.type) {
+ return buildSyntaxErrMsg(&pCxt->msg, "Invalid option comment", sToken.z);
+ }
+ if (sToken.n >= TSDB_TB_COMMENT_LEN) {
+ return buildSyntaxErrMsg(&pCxt->msg, "comment too long", sToken.z);
+ }
+ int32_t len = trimString(sToken.z, sToken.n, pCxt->tmpTokenBuf, TSDB_TB_COMMENT_LEN);
+ pCxt->createTblReq.comment = strndup(pCxt->tmpTokenBuf, len);
+ if (NULL == pCxt->createTblReq.comment) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCxt->createTblReq.commentLen = len;
+ } else {
+ break;
+ }
+ } while (1);
+ return TSDB_CODE_SUCCESS;
+}
+
// pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)
static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* tbFName) {
int32_t len = strlen(tbFName);
@@ -1137,7 +1207,7 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName*
return buildSyntaxErrMsg(&pCxt->msg, ") is expected", sToken.z);
}
- return TSDB_CODE_SUCCESS;
+ return parseTableOptions(pCxt);
}
static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks, int16_t timePrec, bool* gotRow,
@@ -1634,6 +1704,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
pDb = taosHashIterate(context.pDbFNameHashObj, pDb);
}
}
+ if (pContext->pStmtCb) {
+ context.pVgroupsHashObj = NULL;
+ context.pTableBlockHashObj = NULL;
+ }
destroyInsertParseContext(&context);
return code;
}
@@ -1661,6 +1735,21 @@ static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) {
static int32_t skipTagsClause(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
+static int32_t skipTableOptions(SInsertParseSyntaxCxt* pCxt) {
+ do {
+ int32_t index = 0;
+ SToken sToken;
+ NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index);
+ if (TK_TTL == sToken.type || TK_COMMENT == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ } else {
+ break;
+ }
+ } while (1);
+ return TSDB_CODE_SUCCESS;
+}
+
// pSql -> [(tag1_name, ...)] TAGS (tag1_value, ...)
static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
SToken sToken;
@@ -1679,21 +1768,30 @@ static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z);
}
CHECK_CODE(skipTagsClause(pCxt));
+ CHECK_CODE(skipTableOptions(pCxt));
return TSDB_CODE_SUCCESS;
}
static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, bool isStable, int32_t tableNo, SToken* pTbToken) {
- SName name;
+ SName name = {0};
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
CHECK_CODE(reserveTableMetaInCacheForInsert(&name, isStable ? CATALOG_REQ_TYPE_META : CATALOG_REQ_TYPE_BOTH, tableNo,
pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
+static int32_t checkTableName(const char* pTableName, SMsgBuf* pMsgBuf) {
+ if (NULL != strchr(pTableName, '.')) {
+ return generateSyntaxErrMsgExt(pMsgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, "The table name cannot contain '.'");
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, int32_t tableNo, SToken* pTbToken) {
- SName name;
+ SName name = {0};
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
+ CHECK_CODE(checkTableName(name.tname, &pCxt->msg));
CHECK_CODE(reserveTableMetaInCacheForInsert(&name, CATALOG_REQ_TYPE_VGROUP, tableNo, pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
@@ -2214,7 +2312,7 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS
SToken sToken = {.n = kv->keyLen, .z = (char*)kv->key};
col_id_t t = lastColIdx + 1;
col_id_t index = ((t == 0 && !isTag) ? 0 : findCol(&sToken, t, nCols, pSchema));
- uDebug("SML, index:%d, t:%d, ncols:%d, kv->name:%s", index, t, nCols, kv->key);
+ uDebug("SML, index:%d, t:%d, ncols:%d", index, t, nCols);
if (index < 0 && t > 0) {
index = findCol(&sToken, 0, t, pSchema);
isOrdered = false;
@@ -2435,9 +2533,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
if (p) kv = *p;
}
- if (!kv || kv->length == 0) {
- MemRowAppend(&pBuf, NULL, 0, ¶m);
- } else {
+ if (kv){
int32_t colLen = kv->length;
if (pColSchema->type == TSDB_DATA_TYPE_TIMESTAMP) {
// uError("SML:data before:%" PRId64 ", precision:%d", kv->i, pTableMeta->tableInfo.precision);
@@ -2450,6 +2546,8 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
} else {
MemRowAppend(&pBuf, &(kv->value), colLen, ¶m);
}
+ }else{
+ pBuilder->hasNone = true;
}
if (PRIMARYKEY_TIMESTAMP_COL_ID == pColSchema->colId) {
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 09847feb4db8842a908a536b404371a76a0858eb..a88b7b417fc3d7ce807ae6f263db52cab9bcf918 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -142,8 +142,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_STREAMS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_STREAMS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_STREAMS,
.numOfShowCols = 1,
.pShowCols = {"stream_name"}
},
@@ -184,8 +184,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_TOPICS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_TOPICS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_TOPICS,
.numOfShowCols = 1,
.pShowCols = {"topic_name"}
},
@@ -240,8 +240,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_SUBSCRIPTIONS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_SUBSCRIPTIONS,
.numOfShowCols = 1,
.pShowCols = {"*"}
},
@@ -784,6 +784,9 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p
int32_t nums = pMeta->tableInfo.numOfColumns +
(igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0));
for (int32_t i = 0; i < nums; ++i) {
+ if (invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) {
+ continue;
+ }
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
if (NULL == pCol) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY);
@@ -826,7 +829,8 @@ static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef,
}
int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns;
for (int32_t i = 0; i < nums; ++i) {
- if (0 == strcmp(pCol->colName, pMeta->schema[i].name)) {
+ if (0 == strcmp(pCol->colName, pMeta->schema[i].name) &&
+ !invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) {
setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema + i, (i - pMeta->tableInfo.numOfColumns), pCol);
*pFound = true;
break;
@@ -2156,15 +2160,16 @@ static int32_t setTableIndex(STranslateContext* pCxt, SName* pName, SRealTableNo
return TSDB_CODE_SUCCESS;
}
-static int32_t setTableCacheLastMode(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) {
- if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
+static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSelect) {
+ if (!pSelect->hasLastRowFunc || QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable)) {
return TSDB_CODE_SUCCESS;
}
- SDbCfgInfo dbCfg = {0};
- int32_t code = getDBCfg(pCxt, pRealTable->table.dbName, &dbCfg);
+ SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable;
+ SDbCfgInfo dbCfg = {0};
+ int32_t code = getDBCfg(pCxt, pTable->table.dbName, &dbCfg);
if (TSDB_CODE_SUCCESS == code) {
- pRealTable->cacheLastMode = dbCfg.cacheLast;
+ pTable->cacheLastMode = dbCfg.cacheLast;
}
return code;
}
@@ -2188,18 +2193,15 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
if (TSDB_CODE_SUCCESS == code) {
code = setTableIndex(pCxt, &name, pRealTable);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = setTableCacheLastMode(pCxt, &name, pRealTable);
- }
}
- pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision;
- pRealTable->table.singleTable = isSingleTable(pRealTable);
if (TSDB_CODE_SUCCESS == code) {
+ pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision;
+ pRealTable->table.singleTable = isSingleTable(pRealTable);
+ if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
+ pCxt->stableQuery = true;
+ }
code = addNamespace(pCxt, pRealTable);
}
- if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
- pCxt->stableQuery = true;
- }
break;
}
case QUERY_NODE_TEMP_TABLE: {
@@ -2269,10 +2271,14 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
SColumnNode* pCol = (SColumnNode*)pExpr;
len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName);
+ strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pCol->colName);
+ strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
} else {
len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->aliasName);
+ strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
}
- strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
return (SNode*)pFunc;
}
@@ -2471,13 +2477,65 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
return code;
}
+static EDealRes needFillImpl(SNode* pNode, void* pContext) {
+ if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
+ *(bool*)pContext = true;
+ return DEAL_RES_END;
+ }
+ return DEAL_RES_CONTINUE;
+}
+
+static bool needFill(SNode* pNode) {
+ bool hasFillFunc = false;
+ nodesWalkExpr(pNode, needFillImpl, &hasFillFunc);
+ return hasFillFunc;
+}
+
+static bool mismatchFillDataType(SDataType origDt, SDataType fillDt) {
+ if (TSDB_DATA_TYPE_NULL == fillDt.type) {
+ return false;
+ }
+ if (IS_NUMERIC_TYPE(origDt.type) && !IS_NUMERIC_TYPE(fillDt.type)) {
+ return true;
+ }
+ if (IS_VAR_DATA_TYPE(origDt.type) && !IS_VAR_DATA_TYPE(fillDt.type)) {
+ return true;
+ }
+ return false;
+}
+
+static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
+ if (FILL_MODE_VALUE != pFill->mode) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t fillNo = 0;
+ SNodeListNode* pFillValues = (SNodeListNode*)pFill->pValues;
+ SNode* pProject = NULL;
+ FOREACH(pProject, pProjectionList) {
+ if (needFill(pProject)) {
+ if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
+ }
+ if (mismatchFillDataType(((SExprNode*)pProject)->resType,
+ ((SExprNode*)nodesListGetNode(pFillValues->pNodeList, fillNo))->resType)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
+ }
+ ++fillNo;
+ }
+ }
+ if (fillNo != LIST_LENGTH(pFillValues->pNodeList)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow) ||
NULL == ((SIntervalWindowNode*)pSelect->pWindow)->pFill) {
return TSDB_CODE_SUCCESS;
}
- SFillNode* pFill = (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill;
- return TSDB_CODE_SUCCESS;
+ return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList);
}
static int32_t rewriteProjectAlias(SNodeList* pProjectionList) {
@@ -2594,8 +2652,12 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi
return code;
}
-static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval) {
+static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval, bool isInterpFill) {
if (FILL_MODE_NONE == pFill->mode) {
+ if (isInterpFill) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Unsupported fill type");
+ }
+
return TSDB_CODE_SUCCESS;
}
@@ -2635,7 +2697,7 @@ static int32_t translateFill(STranslateContext* pCxt, SSelectStmt* pSelect, SInt
}
((SFillNode*)pInterval->pFill)->timeRange = pSelect->timeRange;
- return checkFill(pCxt, (SFillNode*)pInterval->pFill, (SValueNode*)pInterval->pInterval);
+ return checkFill(pCxt, (SFillNode*)pInterval->pFill, (SValueNode*)pInterval->pInterval, false);
}
static int64_t getMonthsFromTimeVal(int64_t val, int32_t fromPrecision, char unit) {
@@ -2864,7 +2926,7 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect
code = getQueryTimeRange(pCxt, pSelect->pRange, &(((SFillNode*)pSelect->pFill)->timeRange));
}
if (TSDB_CODE_SUCCESS == code) {
- code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery);
+ code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true);
}
return code;
@@ -3080,6 +3142,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = replaceOrderByAliasForSelect(pCxt, pSelect);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setTableCacheLastMode(pCxt, pSelect);
+ }
return code;
}
@@ -4993,7 +5058,7 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt
return TSDB_CODE_SUCCESS;
}
- if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) ||
+ if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) || NULL == ((SSelectStmt*)pStmt->pQuery)->pFromTable ||
QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pStmt->pQuery)->pFromTable)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
}
@@ -6343,8 +6408,8 @@ typedef struct SVgroupDropTableBatch {
char dbName[TSDB_DB_NAME_LEN];
} SVgroupDropTableBatch;
-static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo) {
- SVDropTbReq req = {.name = pClause->tableName, .igNotExists = pClause->ignoreNotExists};
+static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo, uint64_t suid) {
+ SVDropTbReq req = {.name = pClause->tableName, .suid = suid, .igNotExists = pClause->ignoreNotExists};
SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
if (NULL == pTableBatch) {
SVgroupDropTableBatch tBatch = {0};
@@ -6385,7 +6450,7 @@ static int32_t buildDropTableVgroupHashmap(STranslateContext* pCxt, SDropTableCl
code = getTableHashVgroup(pCxt, pClause->dbName, pClause->tableName, &info);
}
if (TSDB_CODE_SUCCESS == code) {
- addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info);
+ addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info, pTableMeta->suid);
}
over:
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 7e27132f3cbc453a5cf09bd487acc75fa546ff7e..7ee6a5b2236b24a676214c3538ed182aa52f427a 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -136,8 +136,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
}
static EDealRes rewriteQueryExprAliasImpl(SNode* pNode, void* pContext) {
- if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode) && '\0' == ((SExprNode*)pNode)->userAlias[0]) {
- strcpy(((SExprNode*)pNode)->userAlias, ((SExprNode*)pNode)->aliasName);
+ if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode)) {
sprintf(((SExprNode*)pNode)->aliasName, "#%d", *(int32_t*)pContext);
++(*(int32_t*)pContext);
}
diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c
index c820e955d78dc9439499d21645c2456884edb318..c4bd1aff044a491edede232eff74b8dea1feeadb 100644
--- a/source/libs/parser/src/sql.c
+++ b/source/libs/parser/src/sql.c
@@ -104,26 +104,26 @@
#endif
/************* Begin control #defines *****************************************/
#define YYCODETYPE unsigned short int
-#define YYNOCODE 427
+#define YYNOCODE 426
#define YYACTIONTYPE unsigned short int
#define ParseTOKENTYPE SToken
typedef union {
int yyinit;
ParseTOKENTYPE yy0;
- SAlterOption yy95;
- EOperatorType yy198;
- EOrder yy204;
- int8_t yy215;
- ENullOrder yy277;
- bool yy313;
- int64_t yy473;
- SNodeList* yy544;
- SToken yy617;
- EJoinType yy708;
- SDataType yy784;
- EFillMode yy816;
- SNode* yy840;
- int32_t yy844;
+ SAlterOption yy5;
+ int8_t yy59;
+ int64_t yy69;
+ EJoinType yy156;
+ SNodeList* yy172;
+ EFillMode yy186;
+ SToken yy209;
+ int32_t yy232;
+ SNode* yy272;
+ bool yy293;
+ EOperatorType yy392;
+ ENullOrder yy493;
+ SDataType yy616;
+ EOrder yy818;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
#define YYSTACKDEPTH 100
@@ -140,16 +140,16 @@ typedef union {
#define ParseCTX_STORE
#define YYFALLBACK 1
#define YYNSTATE 667
-#define YYNRULE 491
+#define YYNRULE 489
#define YYNTOKEN 305
#define YY_MAX_SHIFT 666
-#define YY_MIN_SHIFTREDUCE 973
-#define YY_MAX_SHIFTREDUCE 1463
-#define YY_ERROR_ACTION 1464
-#define YY_ACCEPT_ACTION 1465
-#define YY_NO_ACTION 1466
-#define YY_MIN_REDUCE 1467
-#define YY_MAX_REDUCE 1957
+#define YY_MIN_SHIFTREDUCE 972
+#define YY_MAX_SHIFTREDUCE 1460
+#define YY_ERROR_ACTION 1461
+#define YY_ACCEPT_ACTION 1462
+#define YY_NO_ACTION 1463
+#define YY_MIN_REDUCE 1464
+#define YY_MAX_REDUCE 1952
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -216,694 +216,650 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2548)
+#define YY_ACTTAB_COUNT (2259)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 526, 30, 261, 526, 549, 433, 526, 434, 1502, 11,
- /* 10 */ 10, 117, 39, 37, 55, 1653, 1654, 117, 471, 378,
- /* 20 */ 339, 1468, 1264, 1006, 476, 1023, 1290, 1022, 1607, 1791,
- /* 30 */ 1598, 1607, 127, 1340, 1607, 1262, 441, 552, 434, 1502,
- /* 40 */ 469, 1775, 107, 1779, 1290, 106, 105, 104, 103, 102,
- /* 50 */ 101, 100, 99, 98, 1775, 1024, 1335, 1809, 150, 64,
- /* 60 */ 1935, 14, 1567, 1010, 1011, 553, 1771, 1777, 1270, 450,
- /* 70 */ 1761, 125, 577, 165, 39, 37, 1403, 1932, 571, 1771,
- /* 80 */ 1777, 328, 339, 1529, 1264, 551, 161, 1877, 1878, 1,
- /* 90 */ 1882, 571, 1659, 479, 478, 1340, 1823, 1262, 1376, 327,
- /* 100 */ 95, 1792, 580, 1794, 1795, 576, 496, 571, 1657, 344,
- /* 110 */ 1869, 663, 1652, 1654, 330, 1865, 160, 513, 1335, 494,
- /* 120 */ 1935, 492, 1289, 14, 325, 1342, 1343, 1705, 164, 543,
- /* 130 */ 1270, 1161, 1162, 1934, 33, 32, 1895, 1932, 40, 38,
- /* 140 */ 36, 35, 34, 148, 63, 1479, 640, 639, 638, 637,
+ /* 0 */ 433, 1930, 434, 1499, 1593, 441, 526, 434, 1499, 513,
+ /* 10 */ 30, 260, 39, 37, 1929, 326, 325, 117, 1927, 1702,
+ /* 20 */ 339, 1465, 1261, 146, 471, 40, 38, 36, 35, 34,
+ /* 30 */ 1786, 552, 1606, 1337, 1604, 1259, 344, 552, 1287, 1649,
+ /* 40 */ 1651, 378, 107, 1774, 526, 106, 105, 104, 103, 102,
+ /* 50 */ 101, 100, 99, 98, 1770, 117, 1332, 432, 1804, 64,
+ /* 60 */ 436, 14, 476, 36, 35, 34, 553, 148, 1267, 1476,
+ /* 70 */ 450, 1756, 1604, 577, 39, 37, 1400, 1595, 1766, 1772,
+ /* 80 */ 328, 1930, 339, 1526, 1261, 1804, 217, 1286, 1770, 1,
+ /* 90 */ 571, 1005, 1656, 542, 164, 1337, 1818, 1259, 1927, 327,
+ /* 100 */ 95, 1787, 580, 1789, 1790, 576, 43, 571, 1654, 158,
+ /* 110 */ 1864, 663, 1766, 1772, 330, 1860, 159, 513, 1332, 63,
+ /* 120 */ 1930, 78, 1643, 14, 571, 1339, 1340, 1703, 163, 541,
+ /* 130 */ 1267, 1009, 1010, 165, 33, 32, 1890, 1927, 40, 38,
+ /* 140 */ 36, 35, 34, 543, 63, 63, 640, 639, 638, 637,
/* 150 */ 349, 2, 636, 635, 128, 630, 629, 628, 627, 626,
/* 160 */ 625, 624, 139, 620, 619, 618, 348, 347, 615, 614,
- /* 170 */ 1265, 107, 1263, 663, 106, 105, 104, 103, 102, 101,
- /* 180 */ 100, 99, 98, 1809, 36, 35, 34, 1342, 1343, 224,
- /* 190 */ 225, 542, 384, 1268, 1269, 613, 1317, 1318, 1320, 1321,
- /* 200 */ 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336, 1337,
- /* 210 */ 1338, 1339, 1341, 1344, 1467, 1288, 1434, 33, 32, 482,
- /* 220 */ 481, 40, 38, 36, 35, 34, 123, 168, 541, 303,
- /* 230 */ 1465, 223, 1265, 84, 1263, 1264, 477, 480, 116, 115,
- /* 240 */ 114, 113, 112, 111, 110, 109, 108, 305, 1262, 1023,
- /* 250 */ 516, 1022, 22, 174, 1600, 1268, 1269, 1490, 1317, 1318,
- /* 260 */ 1320, 1321, 1322, 1323, 1324, 1325, 573, 569, 1333, 1334,
- /* 270 */ 1336, 1337, 1338, 1339, 1341, 1344, 39, 37, 1489, 1024,
- /* 280 */ 538, 1270, 168, 526, 339, 71, 1264, 1488, 70, 354,
- /* 290 */ 1244, 1245, 1708, 1791, 170, 211, 512, 1340, 1761, 1262,
- /* 300 */ 1119, 602, 601, 600, 1123, 599, 1125, 1126, 598, 1128,
- /* 310 */ 595, 1607, 1134, 592, 1136, 1137, 589, 586, 1935, 1761,
- /* 320 */ 1335, 1809, 1584, 1270, 663, 14, 1659, 1935, 1761, 553,
- /* 330 */ 1935, 166, 1270, 343, 1761, 1932, 577, 1935, 39, 37,
- /* 340 */ 1933, 487, 1657, 165, 1932, 552, 339, 1932, 1264, 549,
- /* 350 */ 165, 76, 305, 2, 1932, 516, 497, 544, 539, 1340,
- /* 360 */ 1823, 1262, 1698, 159, 95, 1792, 580, 1794, 1795, 576,
- /* 370 */ 210, 571, 63, 173, 1869, 663, 1646, 127, 330, 1865,
- /* 380 */ 160, 552, 1335, 1265, 490, 1263, 419, 605, 484, 1342,
- /* 390 */ 1343, 33, 32, 209, 1270, 40, 38, 36, 35, 34,
- /* 400 */ 1896, 634, 632, 39, 37, 1345, 1268, 1269, 1487, 91,
- /* 410 */ 622, 339, 1791, 1264, 42, 8, 125, 40, 38, 36,
- /* 420 */ 35, 34, 124, 611, 1340, 58, 1262, 1596, 57, 49,
- /* 430 */ 1599, 162, 1877, 1878, 1265, 1882, 1263, 663, 178, 177,
- /* 440 */ 1809, 352, 137, 136, 608, 607, 606, 1335, 575, 1761,
- /* 450 */ 43, 1342, 1343, 1761, 316, 577, 1486, 1268, 1269, 1270,
- /* 460 */ 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325, 573, 569,
- /* 470 */ 1333, 1334, 1336, 1337, 1338, 1339, 1341, 1344, 63, 1823,
- /* 480 */ 9, 74, 1935, 294, 1792, 580, 1794, 1795, 576, 574,
- /* 490 */ 571, 568, 1841, 1289, 122, 165, 1265, 1761, 1263, 1932,
- /* 500 */ 33, 32, 663, 1602, 40, 38, 36, 35, 34, 317,
- /* 510 */ 168, 315, 314, 1485, 473, 351, 1342, 1343, 475, 1268,
- /* 520 */ 1269, 1291, 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325,
- /* 530 */ 573, 569, 1333, 1334, 1336, 1337, 1338, 1339, 1341, 1344,
- /* 540 */ 474, 1010, 1011, 33, 32, 1460, 1364, 40, 38, 36,
- /* 550 */ 35, 34, 168, 168, 1761, 526, 1935, 1592, 377, 146,
- /* 560 */ 376, 1265, 63, 1263, 26, 1532, 382, 168, 1610, 165,
- /* 570 */ 33, 32, 217, 1932, 40, 38, 36, 35, 34, 218,
- /* 580 */ 1484, 1791, 1414, 1607, 1268, 1269, 1594, 1317, 1318, 1320,
- /* 590 */ 1321, 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336,
- /* 600 */ 1337, 1338, 1339, 1341, 1344, 39, 37, 77, 27, 1809,
- /* 610 */ 498, 1884, 63, 339, 78, 1264, 168, 578, 1369, 1483,
- /* 620 */ 505, 1761, 1761, 373, 577, 1302, 1340, 28, 1262, 482,
- /* 630 */ 481, 1482, 1459, 33, 32, 1881, 123, 40, 38, 36,
- /* 640 */ 35, 34, 375, 371, 438, 1590, 477, 480, 1823, 1335,
- /* 650 */ 1287, 1935, 96, 1792, 580, 1794, 1795, 576, 253, 571,
- /* 660 */ 1761, 1270, 1869, 513, 165, 1481, 1868, 1865, 1932, 1081,
- /* 670 */ 33, 32, 1761, 1706, 40, 38, 36, 35, 34, 666,
- /* 680 */ 33, 32, 9, 526, 40, 38, 36, 35, 34, 1478,
- /* 690 */ 1477, 33, 32, 268, 383, 40, 38, 36, 35, 34,
- /* 700 */ 168, 1704, 1083, 300, 663, 432, 1761, 157, 436, 1698,
- /* 710 */ 214, 1607, 656, 652, 648, 644, 266, 1582, 1342, 1343,
- /* 720 */ 176, 33, 32, 307, 572, 40, 38, 36, 35, 34,
- /* 730 */ 1761, 1761, 39, 37, 526, 604, 526, 302, 1476, 1287,
- /* 740 */ 339, 549, 1264, 526, 307, 389, 412, 404, 92, 424,
- /* 750 */ 168, 231, 1302, 1340, 405, 1262, 440, 1585, 74, 436,
- /* 760 */ 1362, 1407, 1607, 1265, 1607, 1263, 397, 1289, 425, 127,
- /* 770 */ 399, 1607, 1475, 1703, 1779, 300, 1335, 1889, 1396, 1761,
- /* 780 */ 1603, 1362, 44, 4, 523, 1775, 1268, 1269, 1270, 1317,
- /* 790 */ 1318, 1320, 1321, 1322, 1323, 1324, 1325, 573, 569, 1333,
- /* 800 */ 1334, 1336, 1337, 1338, 1339, 1341, 1344, 390, 125, 2,
- /* 810 */ 1771, 1777, 334, 1761, 1363, 7, 220, 450, 611, 386,
- /* 820 */ 90, 526, 571, 163, 1877, 1878, 1659, 1882, 1424, 145,
- /* 830 */ 87, 663, 448, 312, 1236, 1363, 213, 137, 136, 608,
- /* 840 */ 607, 606, 1657, 1480, 1884, 1342, 1343, 423, 1474, 1607,
+ /* 170 */ 1262, 107, 1260, 663, 106, 105, 104, 103, 102, 101,
+ /* 180 */ 100, 99, 98, 440, 1775, 1287, 436, 1339, 1340, 223,
+ /* 190 */ 224, 11, 10, 1265, 1266, 1770, 1314, 1315, 1317, 1318,
+ /* 200 */ 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333, 1334,
+ /* 210 */ 1335, 1336, 1338, 1341, 1464, 496, 1431, 33, 32, 1766,
+ /* 220 */ 1772, 40, 38, 36, 35, 34, 526, 167, 494, 150,
+ /* 230 */ 492, 571, 1262, 1564, 1260, 1261, 210, 55, 116, 115,
+ /* 240 */ 114, 113, 112, 111, 110, 109, 108, 305, 1259, 63,
+ /* 250 */ 516, 1701, 22, 300, 1604, 1265, 1266, 167, 1314, 1315,
+ /* 260 */ 1317, 1318, 1319, 1320, 1321, 1322, 573, 569, 1330, 1331,
+ /* 270 */ 1333, 1334, 1335, 1336, 1338, 1341, 39, 37, 1650, 1651,
+ /* 280 */ 1373, 1267, 167, 167, 339, 552, 1261, 613, 1286, 49,
+ /* 290 */ 1160, 1161, 76, 305, 1786, 1487, 516, 1337, 1421, 1259,
+ /* 300 */ 1118, 602, 601, 600, 1122, 599, 1124, 1125, 598, 1127,
+ /* 310 */ 595, 342, 1133, 592, 1135, 1136, 589, 586, 1285, 146,
+ /* 320 */ 1332, 1581, 1804, 173, 663, 14, 479, 478, 1606, 377,
+ /* 330 */ 553, 376, 1267, 1656, 1930, 1756, 1756, 577, 39, 37,
+ /* 340 */ 312, 535, 1419, 1420, 1422, 1423, 339, 1928, 1261, 1654,
+ /* 350 */ 1705, 1927, 84, 2, 42, 71, 1656, 63, 70, 1337,
+ /* 360 */ 1818, 1259, 1267, 343, 95, 1787, 580, 1789, 1790, 576,
+ /* 370 */ 605, 571, 1654, 1597, 1864, 663, 345, 1589, 330, 1860,
+ /* 380 */ 159, 1288, 1332, 1262, 146, 1260, 1022, 167, 1021, 1339,
+ /* 390 */ 1340, 33, 32, 1606, 1267, 40, 38, 36, 35, 34,
+ /* 400 */ 1891, 1930, 384, 39, 37, 1342, 1265, 1266, 1486, 1485,
+ /* 410 */ 611, 339, 1786, 1261, 165, 8, 1023, 438, 1927, 634,
+ /* 420 */ 632, 1080, 611, 1284, 1337, 622, 1259, 167, 549, 137,
+ /* 430 */ 136, 608, 607, 606, 1262, 1695, 1260, 663, 1484, 303,
+ /* 440 */ 1804, 137, 136, 608, 607, 606, 172, 1332, 575, 1756,
+ /* 450 */ 1756, 1339, 1340, 1756, 1082, 577, 127, 1265, 1266, 1267,
+ /* 460 */ 1314, 1315, 1317, 1318, 1319, 1320, 1321, 1322, 573, 569,
+ /* 470 */ 1330, 1331, 1333, 1334, 1335, 1336, 1338, 1341, 1818, 1756,
+ /* 480 */ 9, 1591, 293, 1787, 580, 1789, 1790, 576, 574, 571,
+ /* 490 */ 568, 1836, 167, 74, 125, 167, 1262, 222, 1260, 33,
+ /* 500 */ 32, 1529, 663, 40, 38, 36, 35, 34, 551, 160,
+ /* 510 */ 1872, 1873, 1587, 1877, 1483, 1600, 1339, 1340, 252, 1265,
+ /* 520 */ 1266, 1579, 1314, 1315, 1317, 1318, 1319, 1320, 1321, 1322,
+ /* 530 */ 573, 569, 1330, 1331, 1333, 1334, 1335, 1336, 1338, 1341,
+ /* 540 */ 1700, 526, 300, 33, 32, 1457, 91, 40, 38, 36,
+ /* 550 */ 35, 34, 169, 167, 316, 1756, 1241, 1242, 623, 124,
+ /* 560 */ 1576, 1262, 1879, 1260, 26, 482, 481, 1596, 1462, 1604,
+ /* 570 */ 33, 32, 123, 1582, 40, 38, 36, 35, 34, 213,
+ /* 580 */ 1786, 1411, 477, 480, 1265, 1266, 1876, 1314, 1315, 1317,
+ /* 590 */ 1318, 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333,
+ /* 600 */ 1334, 1335, 1336, 1338, 1341, 39, 37, 475, 1804, 317,
+ /* 610 */ 146, 315, 314, 339, 473, 1261, 578, 1361, 475, 1607,
+ /* 620 */ 549, 1756, 611, 577, 28, 1299, 1337, 354, 1259, 474,
+ /* 630 */ 33, 32, 1456, 450, 40, 38, 36, 35, 34, 538,
+ /* 640 */ 474, 137, 136, 608, 607, 606, 1818, 1695, 127, 1332,
+ /* 650 */ 96, 1787, 580, 1789, 1790, 576, 572, 571, 175, 74,
+ /* 660 */ 1864, 1267, 526, 609, 1863, 1860, 1647, 1930, 554, 512,
+ /* 670 */ 33, 32, 122, 382, 40, 38, 36, 35, 34, 27,
+ /* 680 */ 164, 1599, 9, 1482, 1927, 1022, 125, 1021, 7, 1366,
+ /* 690 */ 1604, 33, 32, 1481, 1565, 40, 38, 36, 35, 34,
+ /* 700 */ 469, 250, 1872, 548, 663, 547, 33, 32, 1930, 1930,
+ /* 710 */ 40, 38, 36, 35, 34, 1023, 544, 539, 1339, 1340,
+ /* 720 */ 526, 166, 164, 307, 1756, 1927, 1927, 135, 487, 1404,
+ /* 730 */ 526, 383, 39, 37, 1756, 1286, 1480, 302, 1879, 1284,
+ /* 740 */ 339, 389, 1261, 497, 307, 526, 412, 604, 1604, 424,
+ /* 750 */ 526, 549, 1299, 1337, 1477, 1259, 404, 209, 1604, 61,
+ /* 760 */ 1359, 405, 1875, 1262, 373, 1260, 397, 1479, 425, 1478,
+ /* 770 */ 399, 490, 255, 1604, 54, 484, 1332, 1756, 1604, 127,
+ /* 780 */ 208, 1359, 1475, 375, 371, 419, 1265, 1266, 1267, 1314,
+ /* 790 */ 1315, 1317, 1318, 1319, 1320, 1321, 1322, 573, 569, 1330,
+ /* 800 */ 1331, 1333, 1334, 1335, 1336, 1338, 1341, 390, 1756, 2,
+ /* 810 */ 1756, 1397, 58, 526, 1360, 57, 1879, 125, 505, 386,
+ /* 820 */ 1289, 33, 32, 1756, 448, 40, 38, 36, 35, 34,
+ /* 830 */ 1474, 663, 161, 1872, 1873, 1360, 1877, 177, 176, 1505,
+ /* 840 */ 1874, 1604, 1347, 1009, 1010, 1339, 1340, 423, 1286, 1580,
/* 850 */ 418, 417, 416, 415, 414, 411, 410, 409, 408, 407,
- /* 860 */ 403, 402, 401, 400, 394, 393, 392, 391, 1880, 388,
- /* 870 */ 387, 535, 1422, 1423, 1425, 1426, 29, 337, 1357, 1358,
- /* 880 */ 1359, 1360, 1361, 1365, 1366, 1367, 1368, 1350, 61, 1761,
- /* 890 */ 1265, 609, 1263, 1289, 1650, 1935, 1400, 29, 337, 1357,
- /* 900 */ 1358, 1359, 1360, 1361, 1365, 1366, 1367, 1368, 166, 1583,
- /* 910 */ 1791, 1473, 1932, 1268, 1269, 1472, 1317, 1318, 1320, 1321,
- /* 920 */ 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336, 1337,
- /* 930 */ 1338, 1339, 1341, 1344, 623, 147, 1579, 1791, 1809, 526,
- /* 940 */ 279, 611, 610, 256, 1319, 1650, 578, 1884, 1471, 1470,
- /* 950 */ 449, 1761, 1761, 577, 277, 60, 1761, 475, 59, 1292,
- /* 960 */ 137, 136, 608, 607, 606, 1809, 554, 1607, 1289, 613,
- /* 970 */ 1568, 1879, 135, 578, 181, 429, 427, 1823, 1761, 474,
- /* 980 */ 577, 94, 1792, 580, 1794, 1795, 576, 536, 571, 1761,
- /* 990 */ 1761, 1869, 1780, 554, 468, 306, 1865, 273, 53, 509,
- /* 1000 */ 1637, 1659, 1396, 1775, 1823, 526, 63, 1935, 94, 1792,
- /* 1010 */ 580, 1794, 1795, 576, 526, 571, 1604, 1658, 1869, 54,
- /* 1020 */ 167, 1748, 306, 1865, 1932, 1736, 1519, 202, 1771, 1777,
- /* 1030 */ 200, 336, 335, 1607, 1935, 1462, 1463, 558, 526, 526,
- /* 1040 */ 571, 1278, 1607, 1273, 93, 526, 526, 165, 483, 506,
- /* 1050 */ 510, 1932, 1340, 561, 1271, 326, 228, 522, 526, 204,
- /* 1060 */ 526, 1791, 203, 146, 499, 526, 1607, 1607, 361, 524,
- /* 1070 */ 1319, 525, 1609, 1607, 1607, 1335, 262, 41, 222, 68,
- /* 1080 */ 67, 381, 342, 526, 172, 1272, 1607, 1270, 1607, 1809,
- /* 1090 */ 146, 131, 245, 1607, 346, 206, 233, 578, 205, 1609,
- /* 1100 */ 301, 567, 1761, 369, 577, 367, 363, 359, 356, 353,
- /* 1110 */ 345, 1607, 1782, 208, 134, 135, 207, 1810, 146, 1514,
- /* 1120 */ 1399, 1512, 51, 1791, 1213, 226, 237, 1609, 1823, 556,
- /* 1130 */ 566, 51, 95, 1792, 580, 1794, 1795, 576, 519, 571,
- /* 1140 */ 41, 485, 1869, 488, 168, 1319, 330, 1865, 1948, 11,
- /* 1150 */ 10, 1809, 616, 41, 617, 1784, 350, 1903, 584, 578,
- /* 1160 */ 134, 230, 1112, 1503, 1761, 1647, 577, 135, 119, 1421,
- /* 1170 */ 134, 1899, 550, 240, 1069, 1791, 1067, 255, 1370, 250,
- /* 1180 */ 1276, 258, 260, 3, 5, 355, 313, 1326, 1050, 1279,
- /* 1190 */ 1823, 1274, 360, 1229, 95, 1792, 580, 1794, 1795, 576,
- /* 1200 */ 272, 571, 269, 1809, 1869, 1140, 1508, 1144, 330, 1865,
- /* 1210 */ 1948, 578, 1282, 1284, 1151, 1149, 1761, 138, 577, 1926,
- /* 1220 */ 175, 1051, 1275, 1287, 569, 1333, 1334, 1336, 1337, 1338,
- /* 1230 */ 1339, 1791, 385, 1354, 406, 1700, 413, 421, 420, 1293,
- /* 1240 */ 559, 1791, 1823, 422, 426, 431, 95, 1792, 580, 1794,
- /* 1250 */ 1795, 576, 428, 571, 658, 439, 1869, 430, 562, 1809,
- /* 1260 */ 330, 1865, 1948, 1295, 442, 443, 184, 578, 1294, 1809,
- /* 1270 */ 186, 1888, 1761, 1296, 577, 444, 445, 578, 189, 447,
- /* 1280 */ 191, 72, 1761, 73, 577, 451, 470, 554, 195, 472,
- /* 1290 */ 1791, 304, 1597, 199, 118, 1593, 1741, 554, 1823, 501,
- /* 1300 */ 201, 140, 286, 1792, 580, 1794, 1795, 576, 1823, 571,
- /* 1310 */ 141, 1595, 286, 1792, 580, 1794, 1795, 576, 1809, 571,
- /* 1320 */ 1591, 142, 143, 212, 270, 500, 578, 215, 1935, 507,
- /* 1330 */ 504, 1761, 511, 577, 322, 219, 534, 514, 1935, 132,
- /* 1340 */ 1740, 167, 1710, 520, 517, 1932, 133, 324, 81, 521,
- /* 1350 */ 1791, 165, 1292, 530, 271, 1932, 83, 1823, 1608, 235,
- /* 1360 */ 1791, 96, 1792, 580, 1794, 1795, 576, 1900, 571, 537,
- /* 1370 */ 239, 1869, 532, 1910, 6, 565, 1865, 533, 1809, 546,
- /* 1380 */ 329, 1909, 540, 531, 529, 244, 578, 1891, 1809, 528,
- /* 1390 */ 1396, 1761, 1291, 577, 154, 126, 578, 249, 563, 560,
- /* 1400 */ 246, 1761, 48, 577, 1885, 247, 331, 248, 85, 1791,
- /* 1410 */ 582, 1651, 1580, 265, 274, 659, 660, 1823, 1931, 662,
- /* 1420 */ 52, 149, 1792, 580, 1794, 1795, 576, 1823, 571, 1951,
- /* 1430 */ 153, 96, 1792, 580, 1794, 1795, 576, 1809, 571, 557,
- /* 1440 */ 1755, 1869, 323, 287, 297, 578, 1866, 1850, 296, 254,
- /* 1450 */ 1761, 276, 577, 564, 1754, 278, 257, 259, 65, 1753,
- /* 1460 */ 1791, 1752, 66, 1749, 357, 555, 1949, 358, 1256, 1257,
- /* 1470 */ 171, 362, 1747, 364, 365, 366, 1823, 1746, 1745, 368,
- /* 1480 */ 295, 1792, 580, 1794, 1795, 576, 370, 571, 1809, 1744,
- /* 1490 */ 372, 1743, 374, 527, 1232, 1231, 578, 1721, 1720, 379,
- /* 1500 */ 380, 1761, 1201, 577, 1719, 1718, 1693, 129, 1692, 1691,
- /* 1510 */ 1690, 69, 1791, 1689, 1688, 1687, 1686, 1685, 395, 396,
- /* 1520 */ 1684, 398, 1791, 130, 1669, 1668, 1667, 1823, 1683, 1682,
- /* 1530 */ 1681, 295, 1792, 580, 1794, 1795, 576, 1680, 571, 1791,
- /* 1540 */ 1809, 1679, 1678, 1677, 1676, 1675, 1674, 1673, 578, 1672,
- /* 1550 */ 1809, 1671, 1670, 1761, 1666, 577, 1665, 1664, 578, 1663,
- /* 1560 */ 1203, 1662, 1661, 1761, 1660, 577, 1534, 1809, 179, 1533,
- /* 1570 */ 1531, 1499, 120, 182, 180, 575, 1498, 158, 435, 1823,
- /* 1580 */ 1761, 1013, 577, 290, 1792, 580, 1794, 1795, 576, 1823,
- /* 1590 */ 571, 190, 1012, 149, 1792, 580, 1794, 1795, 576, 1791,
- /* 1600 */ 571, 437, 1734, 183, 121, 1728, 1823, 1717, 1716, 1702,
- /* 1610 */ 294, 1792, 580, 1794, 1795, 576, 1791, 571, 188, 1842,
- /* 1620 */ 1586, 545, 1043, 1530, 1528, 452, 454, 1809, 1526, 453,
- /* 1630 */ 456, 457, 338, 458, 1524, 578, 460, 462, 1950, 461,
- /* 1640 */ 1761, 1522, 577, 465, 1809, 464, 1511, 1510, 1495, 340,
- /* 1650 */ 466, 1588, 578, 1155, 1154, 1587, 50, 1761, 631, 577,
- /* 1660 */ 1080, 1077, 633, 1520, 198, 1076, 1823, 1075, 1515, 1513,
- /* 1670 */ 295, 1792, 580, 1794, 1795, 576, 318, 571, 319, 320,
- /* 1680 */ 486, 1494, 1493, 1823, 1791, 489, 197, 295, 1792, 580,
- /* 1690 */ 1794, 1795, 576, 491, 571, 1492, 493, 495, 97, 1733,
- /* 1700 */ 152, 1238, 1791, 1727, 216, 467, 463, 459, 455, 196,
- /* 1710 */ 56, 502, 1809, 144, 1715, 1713, 1714, 1712, 1711, 221,
- /* 1720 */ 578, 1248, 15, 1709, 227, 1761, 79, 577, 1701, 503,
- /* 1730 */ 1809, 321, 508, 80, 232, 518, 41, 87, 578, 229,
- /* 1740 */ 47, 75, 16, 1761, 194, 577, 243, 242, 82, 25,
- /* 1750 */ 17, 1823, 1436, 23, 234, 280, 1792, 580, 1794, 1795,
- /* 1760 */ 576, 1791, 571, 236, 1418, 515, 238, 1782, 151, 1823,
- /* 1770 */ 1420, 252, 241, 281, 1792, 580, 1794, 1795, 576, 24,
- /* 1780 */ 571, 1413, 1393, 46, 1781, 86, 18, 155, 1392, 1809,
- /* 1790 */ 1448, 1453, 1442, 1447, 332, 1452, 1451, 578, 333, 10,
- /* 1800 */ 45, 1280, 1761, 1330, 577, 1355, 193, 187, 13, 192,
- /* 1810 */ 1791, 19, 1328, 446, 1327, 156, 1826, 169, 570, 31,
- /* 1820 */ 12, 20, 1310, 21, 583, 1141, 341, 1138, 1823, 185,
- /* 1830 */ 587, 1791, 282, 1792, 580, 1794, 1795, 576, 1809, 571,
- /* 1840 */ 585, 588, 581, 1135, 579, 590, 578, 1129, 593, 596,
- /* 1850 */ 1118, 1761, 1127, 577, 591, 594, 597, 1133, 1132, 1809,
- /* 1860 */ 1131, 1130, 88, 89, 263, 603, 1150, 578, 1146, 62,
- /* 1870 */ 1041, 1072, 1761, 612, 577, 1071, 1070, 1823, 1068, 1066,
- /* 1880 */ 1065, 289, 1792, 580, 1794, 1795, 576, 1064, 571, 1791,
- /* 1890 */ 1087, 621, 264, 1062, 1061, 1060, 1059, 1058, 1823, 1791,
- /* 1900 */ 1057, 1056, 291, 1792, 580, 1794, 1795, 576, 1047, 571,
- /* 1910 */ 1084, 1082, 1053, 1052, 1049, 1048, 1046, 1809, 1527, 641,
- /* 1920 */ 1525, 642, 643, 645, 647, 578, 1523, 1809, 649, 646,
- /* 1930 */ 1761, 651, 577, 1521, 650, 578, 653, 655, 654, 1509,
- /* 1940 */ 1761, 657, 577, 1491, 1003, 267, 661, 1466, 1466, 1266,
- /* 1950 */ 275, 1791, 664, 1466, 665, 1466, 1823, 1466, 1466, 1466,
- /* 1960 */ 283, 1792, 580, 1794, 1795, 576, 1823, 571, 1791, 1466,
- /* 1970 */ 292, 1792, 580, 1794, 1795, 576, 1466, 571, 1466, 1809,
- /* 1980 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 578, 1466, 1466,
- /* 1990 */ 1466, 1466, 1761, 1466, 577, 1466, 1809, 1466, 1466, 1466,
- /* 2000 */ 1466, 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761,
- /* 2010 */ 1466, 577, 1466, 1466, 1466, 1466, 1466, 1791, 1823, 1466,
- /* 2020 */ 1466, 1466, 284, 1792, 580, 1794, 1795, 576, 1466, 571,
- /* 2030 */ 1466, 1466, 1466, 1466, 1791, 1823, 1466, 1466, 1466, 293,
- /* 2040 */ 1792, 580, 1794, 1795, 576, 1809, 571, 1466, 1466, 1466,
- /* 2050 */ 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466,
- /* 2060 */ 577, 1466, 1809, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2070 */ 578, 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466,
- /* 2080 */ 1466, 1466, 1466, 1791, 1823, 1466, 1466, 1466, 285, 1792,
- /* 2090 */ 580, 1794, 1795, 576, 1466, 571, 1466, 1466, 1466, 1466,
- /* 2100 */ 1466, 1823, 1466, 1466, 1466, 298, 1792, 580, 1794, 1795,
- /* 2110 */ 576, 1809, 571, 1466, 1466, 1466, 1466, 1466, 1466, 578,
- /* 2120 */ 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466, 1466,
- /* 2130 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466, 1466,
- /* 2140 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466, 1466,
- /* 2150 */ 1823, 1466, 1466, 1466, 299, 1792, 580, 1794, 1795, 576,
- /* 2160 */ 1466, 571, 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466,
- /* 2170 */ 1466, 1466, 1466, 578, 1466, 1809, 1466, 1466, 1761, 1466,
- /* 2180 */ 577, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466,
- /* 2190 */ 577, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466,
- /* 2200 */ 1466, 1466, 1466, 1466, 1823, 1466, 1466, 1466, 1803, 1792,
- /* 2210 */ 580, 1794, 1795, 576, 1823, 571, 1791, 1466, 1802, 1792,
- /* 2220 */ 580, 1794, 1795, 576, 1466, 571, 1809, 1466, 1466, 1466,
- /* 2230 */ 1466, 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761,
- /* 2240 */ 1466, 577, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466,
- /* 2250 */ 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577,
- /* 2260 */ 1466, 1466, 1466, 1466, 1466, 1823, 1466, 1466, 1466, 1801,
- /* 2270 */ 1792, 580, 1794, 1795, 576, 1791, 571, 1466, 1466, 1466,
- /* 2280 */ 1466, 1466, 1466, 1823, 1466, 1466, 1466, 310, 1792, 580,
- /* 2290 */ 1794, 1795, 576, 1466, 571, 1466, 1791, 1466, 1466, 1466,
- /* 2300 */ 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2310 */ 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466,
- /* 2320 */ 1466, 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466,
- /* 2330 */ 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577,
- /* 2340 */ 1466, 1466, 1823, 1466, 1466, 1466, 309, 1792, 580, 1794,
- /* 2350 */ 1795, 576, 1791, 571, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2360 */ 1466, 1466, 1791, 1823, 1466, 1466, 1466, 311, 1792, 580,
- /* 2370 */ 1794, 1795, 576, 1466, 571, 1466, 1466, 1466, 1466, 1466,
- /* 2380 */ 1809, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 578, 1466,
- /* 2390 */ 1809, 1466, 1466, 1761, 549, 577, 1466, 1466, 578, 1466,
- /* 2400 */ 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466, 1466, 1466,
- /* 2410 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1823,
- /* 2420 */ 1466, 1466, 127, 308, 1792, 580, 1794, 1795, 576, 1823,
- /* 2430 */ 571, 1466, 1466, 288, 1792, 580, 1794, 1795, 576, 1466,
- /* 2440 */ 571, 549, 554, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2450 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2460 */ 1466, 125, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 127,
- /* 2470 */ 1466, 1466, 1466, 1466, 1466, 1466, 251, 1877, 548, 1466,
- /* 2480 */ 547, 1466, 1466, 1935, 1466, 1466, 1466, 1466, 1466, 554,
- /* 2490 */ 1466, 1466, 1466, 1466, 1466, 1466, 167, 1466, 1466, 1466,
- /* 2500 */ 1932, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 125, 1466,
- /* 2510 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2520 */ 1466, 1466, 1466, 251, 1877, 548, 1466, 547, 1466, 1466,
- /* 2530 */ 1935, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2540 */ 1466, 1466, 1466, 165, 1466, 1466, 1466, 1932,
+ /* 860 */ 403, 402, 401, 400, 394, 393, 392, 391, 549, 388,
+ /* 870 */ 387, 1756, 1656, 616, 1473, 1393, 29, 337, 1354, 1355,
+ /* 880 */ 1356, 1357, 1358, 1362, 1363, 1364, 1365, 658, 1655, 468,
+ /* 890 */ 1262, 610, 1260, 1286, 1647, 1068, 127, 29, 337, 1354,
+ /* 900 */ 1355, 1356, 1357, 1358, 1362, 1363, 1364, 1365, 272, 613,
+ /* 910 */ 536, 1634, 1316, 1265, 1266, 1756, 1314, 1315, 1317, 1318,
+ /* 920 */ 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333, 1334,
+ /* 930 */ 1335, 1336, 1338, 1341, 125, 147, 1472, 1786, 561, 352,
+ /* 940 */ 278, 351, 1884, 1393, 1743, 482, 481, 1516, 1774, 162,
+ /* 950 */ 1872, 1873, 123, 1877, 276, 60, 1805, 232, 59, 1770,
+ /* 960 */ 526, 526, 477, 480, 1471, 1804, 44, 4, 244, 483,
+ /* 970 */ 145, 449, 1601, 578, 180, 429, 427, 1756, 1756, 1930,
+ /* 980 */ 577, 1930, 1500, 1766, 1772, 334, 1470, 1786, 1604, 1604,
+ /* 990 */ 526, 361, 164, 554, 164, 571, 1927, 216, 1927, 498,
+ /* 1000 */ 556, 499, 1469, 1818, 1468, 1756, 63, 94, 1787, 580,
+ /* 1010 */ 1789, 1790, 576, 526, 571, 1804, 558, 1864, 1604, 336,
+ /* 1020 */ 335, 306, 1860, 578, 506, 1316, 526, 1756, 1756, 1275,
+ /* 1030 */ 577, 201, 77, 1930, 199, 1396, 1644, 510, 526, 1930,
+ /* 1040 */ 1337, 1604, 1268, 1756, 93, 1756, 166, 1511, 1467, 227,
+ /* 1050 */ 1927, 350, 164, 1818, 1604, 1786, 1927, 95, 1787, 580,
+ /* 1060 */ 1789, 1790, 576, 1332, 571, 526, 1604, 1864, 41, 485,
+ /* 1070 */ 1316, 330, 1860, 1943, 526, 1267, 522, 53, 509, 68,
+ /* 1080 */ 67, 381, 1898, 1804, 171, 524, 221, 203, 526, 1756,
+ /* 1090 */ 202, 578, 205, 1604, 207, 204, 1756, 206, 577, 525,
+ /* 1100 */ 301, 1509, 1604, 369, 1894, 367, 363, 359, 356, 353,
+ /* 1110 */ 1270, 554, 131, 526, 1786, 1212, 1604, 1269, 566, 567,
+ /* 1120 */ 526, 1818, 134, 488, 261, 94, 1787, 580, 1789, 1790,
+ /* 1130 */ 576, 346, 571, 225, 135, 1864, 51, 550, 666, 306,
+ /* 1140 */ 1860, 1604, 1804, 562, 167, 236, 51, 323, 1604, 41,
+ /* 1150 */ 578, 1930, 267, 90, 1786, 1756, 617, 577, 41, 519,
+ /* 1160 */ 1777, 11, 10, 87, 164, 249, 156, 3, 1927, 229,
+ /* 1170 */ 254, 656, 652, 648, 644, 265, 584, 1276, 1066, 1271,
+ /* 1180 */ 1818, 1111, 1804, 1418, 294, 1787, 580, 1789, 1790, 576,
+ /* 1190 */ 578, 571, 239, 1367, 1786, 1756, 1323, 577, 1459, 1460,
+ /* 1200 */ 1279, 1281, 257, 1779, 259, 271, 134, 92, 135, 5,
+ /* 1210 */ 230, 1049, 569, 1330, 1331, 1333, 1334, 1335, 1336, 559,
+ /* 1220 */ 1818, 360, 1804, 1139, 95, 1787, 580, 1789, 1790, 576,
+ /* 1230 */ 578, 571, 268, 355, 1864, 1756, 119, 577, 330, 1860,
+ /* 1240 */ 1943, 134, 549, 523, 1050, 313, 1228, 1273, 174, 1921,
+ /* 1250 */ 385, 1351, 1284, 1143, 1272, 1150, 406, 413, 1697, 421,
+ /* 1260 */ 1818, 420, 422, 1786, 95, 1787, 580, 1789, 1790, 576,
+ /* 1270 */ 127, 571, 426, 428, 1864, 219, 430, 1290, 330, 1860,
+ /* 1280 */ 1943, 431, 439, 1148, 1292, 442, 183, 443, 138, 1883,
+ /* 1290 */ 554, 1804, 1291, 1235, 185, 212, 444, 1293, 188, 578,
+ /* 1300 */ 445, 190, 447, 72, 1756, 73, 577, 451, 125, 194,
+ /* 1310 */ 470, 472, 1594, 198, 118, 1590, 304, 1786, 200, 554,
+ /* 1320 */ 140, 269, 141, 250, 1872, 548, 1592, 547, 1588, 1818,
+ /* 1330 */ 1930, 142, 143, 285, 1787, 580, 1789, 1790, 576, 211,
+ /* 1340 */ 571, 500, 1736, 164, 214, 1804, 507, 1927, 504, 511,
+ /* 1350 */ 218, 322, 534, 578, 514, 520, 501, 1735, 1756, 1930,
+ /* 1360 */ 577, 132, 1707, 517, 324, 1289, 81, 1786, 521, 133,
+ /* 1370 */ 270, 83, 166, 554, 537, 1605, 1927, 530, 1905, 234,
+ /* 1380 */ 1895, 238, 6, 1818, 1786, 532, 533, 285, 1787, 580,
+ /* 1390 */ 1789, 1790, 576, 329, 571, 1804, 546, 531, 540, 529,
+ /* 1400 */ 528, 248, 1288, 578, 1393, 126, 563, 560, 1756, 48,
+ /* 1410 */ 577, 1880, 1804, 1930, 1904, 85, 1648, 331, 1577, 659,
+ /* 1420 */ 578, 582, 264, 660, 243, 1756, 164, 577, 153, 1886,
+ /* 1430 */ 1927, 247, 245, 1818, 1786, 246, 253, 96, 1787, 580,
+ /* 1440 */ 1789, 1790, 576, 1845, 571, 273, 662, 1864, 299, 275,
+ /* 1450 */ 1818, 565, 1860, 256, 149, 1787, 580, 1789, 1790, 576,
+ /* 1460 */ 1786, 571, 1804, 52, 1946, 1926, 557, 286, 296, 258,
+ /* 1470 */ 578, 564, 295, 1750, 277, 1756, 1749, 577, 65, 1748,
+ /* 1480 */ 1747, 66, 1744, 357, 358, 1253, 1254, 170, 1804, 362,
+ /* 1490 */ 1742, 364, 365, 527, 366, 1741, 578, 368, 555, 1944,
+ /* 1500 */ 1818, 1756, 1740, 577, 96, 1787, 580, 1789, 1790, 576,
+ /* 1510 */ 1786, 571, 370, 1739, 1864, 372, 1738, 1230, 374, 1861,
+ /* 1520 */ 1231, 1718, 1786, 379, 380, 1716, 1818, 1717, 1715, 1690,
+ /* 1530 */ 294, 1787, 580, 1789, 1790, 576, 1786, 571, 1804, 1689,
+ /* 1540 */ 1200, 129, 1688, 1687, 69, 1686, 578, 395, 1681, 396,
+ /* 1550 */ 1804, 1756, 1685, 577, 1684, 1683, 1682, 398, 578, 1680,
+ /* 1560 */ 1679, 1678, 1677, 1756, 1804, 577, 1676, 1675, 1674, 1673,
+ /* 1570 */ 1672, 1671, 575, 1670, 1669, 1668, 1818, 1756, 1667, 577,
+ /* 1580 */ 289, 1787, 580, 1789, 1790, 576, 130, 571, 1818, 1786,
+ /* 1590 */ 1666, 1665, 149, 1787, 580, 1789, 1790, 576, 1664, 571,
+ /* 1600 */ 1663, 1662, 1818, 1202, 1660, 1659, 293, 1787, 580, 1789,
+ /* 1610 */ 1790, 576, 1661, 571, 1658, 1837, 1657, 1804, 545, 1531,
+ /* 1620 */ 178, 1530, 338, 120, 181, 578, 196, 1528, 179, 1496,
+ /* 1630 */ 1756, 157, 577, 435, 1012, 437, 1011, 1945, 1495, 182,
+ /* 1640 */ 152, 121, 1786, 452, 453, 467, 463, 459, 455, 195,
+ /* 1650 */ 1731, 1725, 1714, 189, 1786, 1818, 187, 1713, 1699, 294,
+ /* 1660 */ 1787, 580, 1789, 1790, 576, 1583, 571, 1527, 1786, 1042,
+ /* 1670 */ 1804, 1525, 454, 1523, 456, 340, 458, 457, 578, 1521,
+ /* 1680 */ 460, 75, 1804, 1756, 193, 577, 462, 461, 1519, 464,
+ /* 1690 */ 578, 465, 466, 1508, 1507, 1756, 1804, 577, 1492, 1585,
+ /* 1700 */ 1153, 1154, 197, 1584, 578, 1079, 1074, 50, 1818, 1756,
+ /* 1710 */ 1517, 577, 294, 1787, 580, 1789, 1790, 576, 631, 571,
+ /* 1720 */ 1818, 1076, 1786, 633, 279, 1787, 580, 1789, 1790, 576,
+ /* 1730 */ 1075, 571, 1512, 318, 1818, 319, 1786, 1510, 280, 1787,
+ /* 1740 */ 580, 1789, 1790, 576, 320, 571, 192, 186, 1786, 191,
+ /* 1750 */ 1804, 486, 489, 446, 1491, 491, 1490, 1489, 578, 493,
+ /* 1760 */ 495, 97, 1730, 1756, 1804, 577, 1237, 56, 1724, 184,
+ /* 1770 */ 502, 1712, 578, 1710, 508, 503, 1804, 1756, 215, 577,
+ /* 1780 */ 1711, 1709, 321, 1708, 578, 15, 144, 220, 1818, 1756,
+ /* 1790 */ 1245, 577, 281, 1787, 580, 1789, 1790, 576, 1706, 571,
+ /* 1800 */ 1698, 226, 1818, 518, 79, 1786, 288, 1787, 580, 1789,
+ /* 1810 */ 1790, 576, 228, 571, 1818, 1786, 515, 80, 290, 1787,
+ /* 1820 */ 580, 1789, 1790, 576, 82, 571, 87, 41, 231, 23,
+ /* 1830 */ 47, 1786, 1433, 1804, 233, 241, 235, 1415, 237, 242,
+ /* 1840 */ 1417, 578, 16, 1804, 25, 1777, 1756, 151, 577, 240,
+ /* 1850 */ 24, 578, 46, 1410, 86, 1786, 1756, 17, 577, 1804,
+ /* 1860 */ 1390, 251, 1389, 1776, 154, 1450, 45, 578, 18, 1439,
+ /* 1870 */ 1445, 1818, 1756, 13, 577, 282, 1787, 580, 1789, 1790,
+ /* 1880 */ 576, 1818, 571, 1804, 1444, 291, 1787, 580, 1789, 1790,
+ /* 1890 */ 576, 578, 571, 332, 1449, 1448, 1756, 1818, 577, 333,
+ /* 1900 */ 10, 283, 1787, 580, 1789, 1790, 576, 1277, 571, 1352,
+ /* 1910 */ 19, 1786, 1821, 1307, 1327, 570, 155, 1325, 31, 581,
+ /* 1920 */ 1324, 1818, 12, 20, 168, 292, 1787, 580, 1789, 1790,
+ /* 1930 */ 576, 1786, 571, 21, 583, 1140, 341, 585, 579, 1804,
+ /* 1940 */ 1137, 587, 588, 590, 1134, 591, 593, 578, 596, 1132,
+ /* 1950 */ 594, 1786, 1756, 1128, 577, 1126, 1131, 597, 1117, 1804,
+ /* 1960 */ 1130, 88, 1149, 603, 1129, 89, 62, 578, 262, 1145,
+ /* 1970 */ 612, 1786, 1756, 1071, 577, 1070, 1040, 1818, 1069, 1804,
+ /* 1980 */ 1067, 284, 1787, 580, 1789, 1790, 576, 578, 571, 1065,
+ /* 1990 */ 1064, 1786, 1756, 1063, 577, 263, 1086, 1818, 1061, 1804,
+ /* 2000 */ 1060, 297, 1787, 580, 1789, 1790, 576, 578, 571, 621,
+ /* 2010 */ 1059, 1058, 1756, 1057, 577, 1056, 1055, 1818, 1083, 1804,
+ /* 2020 */ 1081, 298, 1787, 580, 1789, 1790, 576, 578, 571, 1052,
+ /* 2030 */ 1051, 1786, 1756, 1048, 577, 1047, 1046, 1818, 1045, 1524,
+ /* 2040 */ 641, 1798, 1787, 580, 1789, 1790, 576, 1786, 571, 642,
+ /* 2050 */ 643, 1522, 645, 646, 647, 1520, 649, 1818, 651, 1804,
+ /* 2060 */ 650, 1797, 1787, 580, 1789, 1790, 576, 578, 571, 1518,
+ /* 2070 */ 653, 654, 1756, 655, 577, 1804, 1506, 657, 1002, 1488,
+ /* 2080 */ 266, 661, 664, 578, 1263, 274, 665, 1463, 1756, 1463,
+ /* 2090 */ 577, 1463, 1463, 1463, 1463, 1463, 1463, 1818, 1786, 1463,
+ /* 2100 */ 1463, 1796, 1787, 580, 1789, 1790, 576, 1463, 571, 1463,
+ /* 2110 */ 1463, 1463, 1463, 1818, 1786, 1463, 1463, 310, 1787, 580,
+ /* 2120 */ 1789, 1790, 576, 1463, 571, 1463, 1804, 1463, 1463, 1463,
+ /* 2130 */ 1463, 1463, 1463, 1463, 578, 1463, 1463, 1463, 1463, 1756,
+ /* 2140 */ 1463, 577, 1804, 1463, 1463, 1463, 1463, 1463, 1463, 1463,
+ /* 2150 */ 578, 1463, 1463, 1463, 1463, 1756, 1463, 577, 1463, 1463,
+ /* 2160 */ 1463, 1463, 1463, 1463, 1818, 1786, 1463, 1463, 309, 1787,
+ /* 2170 */ 580, 1789, 1790, 576, 1463, 571, 1463, 1463, 1463, 1463,
+ /* 2180 */ 1818, 1786, 1463, 1463, 311, 1787, 580, 1789, 1790, 576,
+ /* 2190 */ 1463, 571, 1463, 1804, 1463, 1463, 1463, 1463, 1463, 1463,
+ /* 2200 */ 1463, 578, 1463, 1463, 1463, 1463, 1756, 1463, 577, 1804,
+ /* 2210 */ 1463, 1463, 1463, 1463, 1463, 1463, 1463, 578, 1463, 1463,
+ /* 2220 */ 1463, 1463, 1756, 1463, 577, 1463, 1463, 1463, 1463, 1463,
+ /* 2230 */ 1463, 1818, 1463, 1463, 1463, 308, 1787, 580, 1789, 1790,
+ /* 2240 */ 576, 1463, 571, 1463, 1463, 1463, 1463, 1818, 1463, 1463,
+ /* 2250 */ 1463, 287, 1787, 580, 1789, 1790, 576, 1463, 571,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 316, 390, 391, 316, 316, 312, 316, 314, 315, 1,
- /* 10 */ 2, 327, 12, 13, 327, 350, 351, 327, 334, 364,
- /* 20 */ 20, 0, 22, 4, 334, 20, 20, 22, 344, 308,
- /* 30 */ 338, 344, 344, 33, 344, 35, 312, 20, 314, 315,
- /* 40 */ 35, 349, 21, 338, 20, 24, 25, 26, 27, 28,
- /* 50 */ 29, 30, 31, 32, 349, 50, 56, 336, 321, 4,
- /* 60 */ 405, 61, 325, 44, 45, 344, 374, 375, 68, 60,
- /* 70 */ 349, 383, 351, 418, 12, 13, 14, 422, 386, 374,
- /* 80 */ 375, 376, 20, 0, 22, 397, 398, 399, 400, 89,
- /* 90 */ 402, 386, 336, 322, 323, 33, 375, 35, 90, 343,
- /* 100 */ 379, 380, 381, 382, 383, 384, 21, 386, 352, 347,
- /* 110 */ 389, 111, 350, 351, 393, 394, 395, 351, 56, 34,
- /* 120 */ 405, 36, 20, 61, 358, 125, 126, 361, 407, 20,
- /* 130 */ 68, 125, 126, 418, 8, 9, 415, 422, 12, 13,
- /* 140 */ 14, 15, 16, 307, 89, 309, 63, 64, 65, 66,
+ /* 0 */ 312, 404, 314, 315, 337, 312, 316, 314, 315, 351,
+ /* 10 */ 389, 390, 12, 13, 417, 328, 358, 327, 421, 361,
+ /* 20 */ 20, 0, 22, 336, 334, 12, 13, 14, 15, 16,
+ /* 30 */ 308, 20, 345, 33, 344, 35, 347, 20, 20, 350,
+ /* 40 */ 351, 364, 21, 338, 316, 24, 25, 26, 27, 28,
+ /* 50 */ 29, 30, 31, 32, 349, 327, 56, 313, 336, 4,
+ /* 60 */ 316, 61, 334, 14, 15, 16, 344, 307, 68, 309,
+ /* 70 */ 60, 349, 344, 351, 12, 13, 14, 338, 373, 374,
+ /* 80 */ 375, 404, 20, 0, 22, 336, 56, 20, 349, 89,
+ /* 90 */ 385, 4, 336, 344, 417, 33, 374, 35, 421, 343,
+ /* 100 */ 378, 379, 380, 381, 382, 383, 89, 385, 352, 335,
+ /* 110 */ 388, 111, 373, 374, 392, 393, 394, 351, 56, 89,
+ /* 120 */ 404, 91, 348, 61, 385, 125, 126, 361, 406, 380,
+ /* 130 */ 68, 44, 45, 417, 8, 9, 414, 421, 12, 13,
+ /* 140 */ 14, 15, 16, 20, 89, 89, 63, 64, 65, 66,
/* 150 */ 67, 89, 69, 70, 71, 72, 73, 74, 75, 76,
/* 160 */ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
/* 170 */ 170, 21, 172, 111, 24, 25, 26, 27, 28, 29,
- /* 180 */ 30, 31, 32, 336, 14, 15, 16, 125, 126, 120,
- /* 190 */ 121, 344, 316, 193, 194, 60, 196, 197, 198, 199,
+ /* 180 */ 30, 31, 32, 313, 338, 20, 316, 125, 126, 120,
+ /* 190 */ 121, 1, 2, 193, 194, 349, 196, 197, 198, 199,
/* 200 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
- /* 210 */ 210, 211, 212, 213, 0, 20, 90, 8, 9, 64,
- /* 220 */ 65, 12, 13, 14, 15, 16, 71, 227, 381, 353,
- /* 230 */ 305, 120, 170, 318, 172, 22, 81, 82, 24, 25,
- /* 240 */ 26, 27, 28, 29, 30, 31, 32, 178, 35, 20,
- /* 250 */ 181, 22, 43, 56, 339, 193, 194, 308, 196, 197,
+ /* 210 */ 210, 211, 212, 213, 0, 21, 90, 8, 9, 373,
+ /* 220 */ 374, 12, 13, 14, 15, 16, 316, 227, 34, 321,
+ /* 230 */ 36, 385, 170, 325, 172, 22, 121, 327, 24, 25,
+ /* 240 */ 26, 27, 28, 29, 30, 31, 32, 178, 35, 89,
+ /* 250 */ 181, 360, 43, 362, 344, 193, 194, 227, 196, 197,
/* 260 */ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
- /* 270 */ 208, 209, 210, 211, 212, 213, 12, 13, 308, 50,
- /* 280 */ 155, 68, 227, 316, 20, 88, 22, 308, 91, 364,
- /* 290 */ 179, 180, 0, 308, 327, 121, 364, 33, 349, 35,
+ /* 270 */ 208, 209, 210, 211, 212, 213, 12, 13, 350, 351,
+ /* 280 */ 90, 68, 227, 227, 20, 20, 22, 60, 20, 89,
+ /* 290 */ 125, 126, 177, 178, 308, 308, 181, 33, 193, 35,
/* 300 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
- /* 310 */ 112, 344, 114, 115, 116, 117, 118, 119, 405, 349,
- /* 320 */ 56, 336, 0, 68, 111, 61, 336, 405, 349, 344,
- /* 330 */ 405, 418, 68, 343, 349, 422, 351, 405, 12, 13,
- /* 340 */ 418, 4, 352, 418, 422, 20, 20, 422, 22, 316,
- /* 350 */ 418, 177, 178, 89, 422, 181, 19, 232, 233, 33,
- /* 360 */ 375, 35, 344, 335, 379, 380, 381, 382, 383, 384,
- /* 370 */ 33, 386, 89, 355, 389, 111, 348, 344, 393, 394,
- /* 380 */ 395, 20, 56, 170, 47, 172, 77, 100, 51, 125,
- /* 390 */ 126, 8, 9, 56, 68, 12, 13, 14, 15, 16,
- /* 400 */ 415, 322, 323, 12, 13, 14, 193, 194, 308, 318,
- /* 410 */ 68, 20, 308, 22, 89, 89, 383, 12, 13, 14,
- /* 420 */ 15, 16, 331, 101, 33, 88, 35, 337, 91, 89,
- /* 430 */ 339, 398, 399, 400, 170, 402, 172, 111, 129, 130,
- /* 440 */ 336, 364, 120, 121, 122, 123, 124, 56, 344, 349,
- /* 450 */ 89, 125, 126, 349, 37, 351, 308, 193, 194, 68,
+ /* 310 */ 112, 328, 114, 115, 116, 117, 118, 119, 20, 336,
+ /* 320 */ 56, 0, 336, 56, 111, 61, 322, 323, 345, 169,
+ /* 330 */ 344, 171, 68, 336, 404, 349, 349, 351, 12, 13,
+ /* 340 */ 343, 236, 237, 238, 239, 240, 20, 417, 22, 352,
+ /* 350 */ 0, 421, 318, 89, 89, 88, 336, 89, 91, 33,
+ /* 360 */ 374, 35, 68, 343, 378, 379, 380, 381, 382, 383,
+ /* 370 */ 100, 385, 352, 339, 388, 111, 328, 337, 392, 393,
+ /* 380 */ 394, 20, 56, 170, 336, 172, 20, 227, 22, 125,
+ /* 390 */ 126, 8, 9, 345, 68, 12, 13, 14, 15, 16,
+ /* 400 */ 414, 404, 316, 12, 13, 14, 193, 194, 308, 308,
+ /* 410 */ 101, 20, 308, 22, 417, 89, 50, 14, 421, 322,
+ /* 420 */ 323, 35, 101, 20, 33, 68, 35, 227, 316, 120,
+ /* 430 */ 121, 122, 123, 124, 170, 344, 172, 111, 308, 353,
+ /* 440 */ 336, 120, 121, 122, 123, 124, 355, 56, 344, 349,
+ /* 450 */ 349, 125, 126, 349, 68, 351, 344, 193, 194, 68,
/* 460 */ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
- /* 470 */ 206, 207, 208, 209, 210, 211, 212, 213, 89, 375,
- /* 480 */ 89, 320, 405, 379, 380, 381, 382, 383, 384, 385,
- /* 490 */ 386, 387, 388, 20, 333, 418, 170, 349, 172, 422,
- /* 500 */ 8, 9, 111, 342, 12, 13, 14, 15, 16, 92,
- /* 510 */ 227, 94, 95, 308, 97, 364, 125, 126, 101, 193,
- /* 520 */ 194, 20, 196, 197, 198, 199, 200, 201, 202, 203,
+ /* 470 */ 206, 207, 208, 209, 210, 211, 212, 213, 374, 349,
+ /* 480 */ 89, 337, 378, 379, 380, 381, 382, 383, 384, 385,
+ /* 490 */ 386, 387, 227, 320, 382, 227, 170, 120, 172, 8,
+ /* 500 */ 9, 0, 111, 12, 13, 14, 15, 16, 396, 397,
+ /* 510 */ 398, 399, 337, 401, 308, 342, 125, 126, 157, 193,
+ /* 520 */ 194, 0, 196, 197, 198, 199, 200, 201, 202, 203,
/* 530 */ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
- /* 540 */ 123, 44, 45, 8, 9, 162, 152, 12, 13, 14,
- /* 550 */ 15, 16, 227, 227, 349, 316, 405, 337, 169, 336,
- /* 560 */ 171, 170, 89, 172, 2, 0, 327, 227, 345, 418,
- /* 570 */ 8, 9, 56, 422, 12, 13, 14, 15, 16, 56,
- /* 580 */ 308, 308, 90, 344, 193, 194, 337, 196, 197, 198,
+ /* 540 */ 360, 316, 362, 8, 9, 162, 318, 12, 13, 14,
+ /* 550 */ 15, 16, 327, 227, 37, 349, 179, 180, 324, 331,
+ /* 560 */ 326, 170, 376, 172, 2, 64, 65, 339, 305, 344,
+ /* 570 */ 8, 9, 71, 0, 12, 13, 14, 15, 16, 337,
+ /* 580 */ 308, 90, 81, 82, 193, 194, 400, 196, 197, 198,
/* 590 */ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
- /* 600 */ 209, 210, 211, 212, 213, 12, 13, 91, 214, 336,
- /* 610 */ 364, 377, 89, 20, 91, 22, 227, 344, 224, 308,
- /* 620 */ 368, 349, 349, 165, 351, 90, 33, 2, 35, 64,
- /* 630 */ 65, 308, 249, 8, 9, 401, 71, 12, 13, 14,
- /* 640 */ 15, 16, 184, 185, 14, 337, 81, 82, 375, 56,
- /* 650 */ 20, 405, 379, 380, 381, 382, 383, 384, 157, 386,
- /* 660 */ 349, 68, 389, 351, 418, 308, 393, 394, 422, 35,
- /* 670 */ 8, 9, 349, 361, 12, 13, 14, 15, 16, 19,
- /* 680 */ 8, 9, 89, 316, 12, 13, 14, 15, 16, 308,
- /* 690 */ 308, 8, 9, 33, 327, 12, 13, 14, 15, 16,
- /* 700 */ 227, 360, 68, 362, 111, 313, 349, 47, 316, 344,
- /* 710 */ 337, 344, 52, 53, 54, 55, 56, 0, 125, 126,
- /* 720 */ 355, 8, 9, 61, 337, 12, 13, 14, 15, 16,
- /* 730 */ 349, 349, 12, 13, 316, 337, 316, 18, 308, 20,
- /* 740 */ 20, 316, 22, 316, 61, 327, 27, 327, 88, 30,
- /* 750 */ 227, 91, 90, 33, 327, 35, 313, 0, 320, 316,
- /* 760 */ 98, 14, 344, 170, 344, 172, 47, 20, 49, 344,
- /* 770 */ 51, 344, 308, 360, 338, 362, 56, 225, 226, 349,
- /* 780 */ 342, 98, 42, 43, 124, 349, 193, 194, 68, 196,
+ /* 600 */ 209, 210, 211, 212, 213, 12, 13, 101, 336, 92,
+ /* 610 */ 336, 94, 95, 20, 97, 22, 344, 152, 101, 345,
+ /* 620 */ 316, 349, 101, 351, 2, 90, 33, 364, 35, 123,
+ /* 630 */ 8, 9, 249, 60, 12, 13, 14, 15, 16, 155,
+ /* 640 */ 123, 120, 121, 122, 123, 124, 374, 344, 344, 56,
+ /* 650 */ 378, 379, 380, 381, 382, 383, 337, 385, 355, 320,
+ /* 660 */ 388, 68, 316, 346, 392, 393, 349, 404, 364, 364,
+ /* 670 */ 8, 9, 333, 327, 12, 13, 14, 15, 16, 214,
+ /* 680 */ 417, 342, 89, 308, 421, 20, 382, 22, 39, 224,
+ /* 690 */ 344, 8, 9, 308, 325, 12, 13, 14, 15, 16,
+ /* 700 */ 35, 397, 398, 399, 111, 401, 8, 9, 404, 404,
+ /* 710 */ 12, 13, 14, 15, 16, 50, 232, 233, 125, 126,
+ /* 720 */ 316, 417, 417, 61, 349, 421, 421, 43, 4, 14,
+ /* 730 */ 316, 327, 12, 13, 349, 20, 308, 18, 376, 20,
+ /* 740 */ 20, 327, 22, 19, 61, 316, 27, 337, 344, 30,
+ /* 750 */ 316, 316, 90, 33, 309, 35, 327, 33, 344, 3,
+ /* 760 */ 98, 327, 400, 170, 165, 172, 47, 308, 49, 308,
+ /* 770 */ 51, 47, 424, 344, 90, 51, 56, 349, 344, 344,
+ /* 780 */ 56, 98, 308, 184, 185, 77, 193, 194, 68, 196,
/* 790 */ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
- /* 800 */ 207, 208, 209, 210, 211, 212, 213, 88, 383, 89,
- /* 810 */ 374, 375, 376, 349, 152, 39, 156, 60, 101, 100,
- /* 820 */ 89, 316, 386, 398, 399, 400, 336, 402, 193, 157,
- /* 830 */ 99, 111, 327, 343, 174, 152, 176, 120, 121, 122,
- /* 840 */ 123, 124, 352, 309, 377, 125, 126, 128, 308, 344,
+ /* 800 */ 207, 208, 209, 210, 211, 212, 213, 88, 349, 89,
+ /* 810 */ 349, 4, 88, 316, 152, 91, 376, 382, 368, 100,
+ /* 820 */ 20, 8, 9, 349, 327, 12, 13, 14, 15, 16,
+ /* 830 */ 308, 111, 397, 398, 399, 152, 401, 129, 130, 0,
+ /* 840 */ 400, 344, 14, 44, 45, 125, 126, 128, 20, 0,
/* 850 */ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
- /* 860 */ 141, 142, 143, 144, 145, 146, 147, 148, 401, 150,
- /* 870 */ 151, 236, 237, 238, 239, 240, 214, 215, 216, 217,
- /* 880 */ 218, 219, 220, 221, 222, 223, 224, 14, 3, 349,
- /* 890 */ 170, 346, 172, 20, 349, 405, 4, 214, 215, 216,
- /* 900 */ 217, 218, 219, 220, 221, 222, 223, 224, 418, 0,
- /* 910 */ 308, 308, 422, 193, 194, 308, 196, 197, 198, 199,
+ /* 860 */ 141, 142, 143, 144, 145, 146, 147, 148, 316, 150,
+ /* 870 */ 151, 349, 336, 13, 308, 226, 214, 215, 216, 217,
+ /* 880 */ 218, 219, 220, 221, 222, 223, 224, 48, 352, 317,
+ /* 890 */ 170, 346, 172, 20, 349, 35, 344, 214, 215, 216,
+ /* 900 */ 217, 218, 219, 220, 221, 222, 223, 224, 329, 60,
+ /* 910 */ 415, 332, 197, 193, 194, 349, 196, 197, 198, 199,
/* 920 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
- /* 930 */ 210, 211, 212, 213, 324, 18, 326, 308, 336, 316,
- /* 940 */ 23, 101, 346, 425, 197, 349, 344, 377, 308, 308,
- /* 950 */ 327, 349, 349, 351, 37, 38, 349, 101, 41, 20,
- /* 960 */ 120, 121, 122, 123, 124, 336, 364, 344, 20, 60,
- /* 970 */ 325, 401, 43, 344, 57, 58, 59, 375, 349, 123,
- /* 980 */ 351, 379, 380, 381, 382, 383, 384, 416, 386, 349,
- /* 990 */ 349, 389, 338, 364, 317, 393, 394, 329, 157, 158,
- /* 1000 */ 332, 336, 226, 349, 375, 316, 89, 405, 379, 380,
- /* 1010 */ 381, 382, 383, 384, 316, 386, 327, 352, 389, 90,
- /* 1020 */ 418, 0, 393, 394, 422, 327, 0, 93, 374, 375,
- /* 1030 */ 96, 12, 13, 344, 405, 125, 126, 43, 316, 316,
- /* 1040 */ 386, 22, 344, 35, 127, 316, 316, 418, 22, 327,
- /* 1050 */ 327, 422, 33, 43, 35, 328, 327, 327, 316, 93,
- /* 1060 */ 316, 308, 96, 336, 371, 316, 344, 344, 47, 327,
- /* 1070 */ 197, 327, 345, 344, 344, 56, 327, 43, 43, 162,
- /* 1080 */ 163, 164, 328, 316, 167, 35, 344, 68, 344, 336,
- /* 1090 */ 336, 43, 412, 344, 327, 93, 157, 344, 96, 345,
- /* 1100 */ 183, 61, 349, 186, 351, 188, 189, 190, 191, 192,
- /* 1110 */ 328, 344, 46, 93, 43, 43, 96, 336, 336, 0,
- /* 1120 */ 228, 0, 43, 308, 90, 90, 43, 345, 375, 244,
- /* 1130 */ 111, 43, 379, 380, 381, 382, 383, 384, 90, 386,
- /* 1140 */ 43, 22, 389, 22, 227, 197, 393, 394, 395, 1,
- /* 1150 */ 2, 336, 13, 43, 13, 89, 317, 404, 43, 344,
- /* 1160 */ 43, 90, 90, 315, 349, 348, 351, 43, 43, 90,
- /* 1170 */ 43, 378, 403, 90, 35, 308, 35, 419, 90, 396,
- /* 1180 */ 172, 419, 419, 406, 229, 373, 372, 90, 35, 170,
- /* 1190 */ 375, 172, 47, 168, 379, 380, 381, 382, 383, 384,
- /* 1200 */ 90, 386, 366, 336, 389, 90, 0, 90, 393, 394,
- /* 1210 */ 395, 344, 193, 194, 90, 90, 349, 90, 351, 404,
- /* 1220 */ 42, 68, 172, 20, 205, 206, 207, 208, 209, 210,
- /* 1230 */ 211, 308, 356, 193, 316, 316, 356, 152, 354, 20,
- /* 1240 */ 246, 308, 375, 354, 316, 310, 379, 380, 381, 382,
- /* 1250 */ 383, 384, 316, 386, 48, 310, 389, 316, 248, 336,
- /* 1260 */ 393, 394, 395, 20, 370, 351, 320, 344, 20, 336,
- /* 1270 */ 320, 404, 349, 20, 351, 363, 365, 344, 320, 363,
- /* 1280 */ 320, 320, 349, 320, 351, 316, 310, 364, 320, 336,
- /* 1290 */ 308, 310, 336, 336, 316, 336, 349, 364, 375, 369,
- /* 1300 */ 336, 336, 379, 380, 381, 382, 383, 384, 375, 386,
- /* 1310 */ 336, 336, 379, 380, 381, 382, 383, 384, 336, 386,
- /* 1320 */ 336, 336, 336, 318, 370, 175, 344, 318, 405, 316,
- /* 1330 */ 351, 349, 316, 351, 363, 318, 234, 349, 405, 359,
- /* 1340 */ 349, 418, 349, 154, 349, 422, 359, 349, 318, 357,
- /* 1350 */ 308, 418, 20, 349, 332, 422, 318, 375, 344, 359,
- /* 1360 */ 308, 379, 380, 381, 382, 383, 384, 378, 386, 235,
- /* 1370 */ 359, 389, 349, 411, 241, 393, 394, 349, 336, 161,
- /* 1380 */ 349, 411, 349, 243, 242, 413, 344, 414, 336, 230,
- /* 1390 */ 226, 349, 20, 351, 411, 344, 344, 373, 247, 245,
- /* 1400 */ 410, 349, 89, 351, 377, 409, 250, 408, 89, 308,
- /* 1410 */ 340, 349, 326, 318, 316, 36, 311, 375, 421, 310,
- /* 1420 */ 367, 379, 380, 381, 382, 383, 384, 375, 386, 426,
- /* 1430 */ 362, 379, 380, 381, 382, 383, 384, 336, 386, 421,
- /* 1440 */ 0, 389, 341, 330, 330, 344, 394, 392, 330, 420,
- /* 1450 */ 349, 319, 351, 421, 0, 306, 420, 420, 177, 0,
- /* 1460 */ 308, 0, 42, 0, 35, 423, 424, 187, 35, 35,
- /* 1470 */ 35, 187, 0, 35, 35, 187, 375, 0, 0, 187,
- /* 1480 */ 379, 380, 381, 382, 383, 384, 35, 386, 336, 0,
- /* 1490 */ 22, 0, 35, 341, 172, 170, 344, 0, 0, 166,
- /* 1500 */ 165, 349, 46, 351, 0, 0, 0, 42, 0, 0,
- /* 1510 */ 0, 149, 308, 0, 0, 0, 0, 0, 144, 35,
- /* 1520 */ 0, 144, 308, 42, 0, 0, 0, 375, 0, 0,
- /* 1530 */ 0, 379, 380, 381, 382, 383, 384, 0, 386, 308,
- /* 1540 */ 336, 0, 0, 0, 0, 0, 0, 0, 344, 0,
- /* 1550 */ 336, 0, 0, 349, 0, 351, 0, 0, 344, 0,
- /* 1560 */ 22, 0, 0, 349, 0, 351, 0, 336, 56, 0,
- /* 1570 */ 0, 0, 39, 42, 56, 344, 0, 43, 46, 375,
- /* 1580 */ 349, 14, 351, 379, 380, 381, 382, 383, 384, 375,
- /* 1590 */ 386, 161, 14, 379, 380, 381, 382, 383, 384, 308,
- /* 1600 */ 386, 46, 0, 40, 39, 0, 375, 0, 0, 0,
- /* 1610 */ 379, 380, 381, 382, 383, 384, 308, 386, 39, 388,
- /* 1620 */ 0, 417, 62, 0, 0, 35, 39, 336, 0, 47,
- /* 1630 */ 35, 47, 341, 39, 0, 344, 35, 39, 424, 47,
- /* 1640 */ 349, 0, 351, 47, 336, 35, 0, 0, 0, 341,
- /* 1650 */ 39, 0, 344, 35, 22, 0, 98, 349, 43, 351,
- /* 1660 */ 35, 35, 43, 0, 96, 35, 375, 22, 0, 0,
- /* 1670 */ 379, 380, 381, 382, 383, 384, 22, 386, 22, 22,
- /* 1680 */ 49, 0, 0, 375, 308, 35, 33, 379, 380, 381,
- /* 1690 */ 382, 383, 384, 35, 386, 0, 35, 22, 20, 0,
- /* 1700 */ 47, 35, 308, 0, 154, 52, 53, 54, 55, 56,
- /* 1710 */ 157, 22, 336, 173, 0, 0, 0, 0, 0, 90,
- /* 1720 */ 344, 35, 89, 0, 89, 349, 89, 351, 0, 157,
- /* 1730 */ 336, 157, 159, 39, 46, 155, 43, 99, 344, 153,
- /* 1740 */ 43, 88, 231, 349, 91, 351, 46, 43, 89, 43,
- /* 1750 */ 231, 375, 90, 89, 89, 379, 380, 381, 382, 383,
- /* 1760 */ 384, 308, 386, 90, 90, 182, 89, 46, 89, 375,
- /* 1770 */ 90, 46, 89, 379, 380, 381, 382, 383, 384, 89,
- /* 1780 */ 386, 90, 90, 43, 46, 89, 43, 46, 90, 336,
- /* 1790 */ 35, 90, 90, 35, 35, 35, 35, 344, 35, 2,
- /* 1800 */ 225, 22, 349, 90, 351, 193, 153, 154, 231, 156,
- /* 1810 */ 308, 43, 90, 160, 90, 46, 89, 46, 89, 89,
- /* 1820 */ 89, 89, 22, 89, 35, 90, 35, 90, 375, 176,
- /* 1830 */ 35, 308, 379, 380, 381, 382, 383, 384, 336, 386,
- /* 1840 */ 89, 89, 100, 90, 195, 35, 344, 90, 35, 35,
- /* 1850 */ 22, 349, 90, 351, 89, 89, 89, 113, 113, 336,
- /* 1860 */ 113, 113, 89, 89, 43, 101, 35, 344, 22, 89,
- /* 1870 */ 62, 35, 349, 61, 351, 35, 35, 375, 35, 35,
- /* 1880 */ 35, 379, 380, 381, 382, 383, 384, 35, 386, 308,
- /* 1890 */ 68, 87, 43, 35, 35, 22, 35, 22, 375, 308,
- /* 1900 */ 35, 35, 379, 380, 381, 382, 383, 384, 22, 386,
- /* 1910 */ 68, 35, 35, 35, 35, 35, 35, 336, 0, 35,
- /* 1920 */ 0, 47, 39, 35, 39, 344, 0, 336, 35, 47,
- /* 1930 */ 349, 39, 351, 0, 47, 344, 35, 39, 47, 0,
- /* 1940 */ 349, 35, 351, 0, 35, 22, 21, 427, 427, 22,
- /* 1950 */ 22, 308, 21, 427, 20, 427, 375, 427, 427, 427,
- /* 1960 */ 379, 380, 381, 382, 383, 384, 375, 386, 308, 427,
- /* 1970 */ 379, 380, 381, 382, 383, 384, 427, 386, 427, 336,
- /* 1980 */ 427, 427, 427, 427, 427, 427, 427, 344, 427, 427,
- /* 1990 */ 427, 427, 349, 427, 351, 427, 336, 427, 427, 427,
- /* 2000 */ 427, 427, 427, 427, 344, 427, 427, 427, 427, 349,
- /* 2010 */ 427, 351, 427, 427, 427, 427, 427, 308, 375, 427,
- /* 2020 */ 427, 427, 379, 380, 381, 382, 383, 384, 427, 386,
- /* 2030 */ 427, 427, 427, 427, 308, 375, 427, 427, 427, 379,
- /* 2040 */ 380, 381, 382, 383, 384, 336, 386, 427, 427, 427,
- /* 2050 */ 427, 427, 427, 344, 427, 427, 427, 427, 349, 427,
- /* 2060 */ 351, 427, 336, 427, 427, 427, 427, 427, 427, 427,
- /* 2070 */ 344, 427, 427, 427, 427, 349, 427, 351, 427, 427,
- /* 2080 */ 427, 427, 427, 308, 375, 427, 427, 427, 379, 380,
- /* 2090 */ 381, 382, 383, 384, 427, 386, 427, 427, 427, 427,
- /* 2100 */ 427, 375, 427, 427, 427, 379, 380, 381, 382, 383,
- /* 2110 */ 384, 336, 386, 427, 427, 427, 427, 427, 427, 344,
- /* 2120 */ 427, 427, 427, 427, 349, 427, 351, 427, 427, 427,
- /* 2130 */ 427, 427, 427, 427, 427, 427, 427, 308, 427, 427,
- /* 2140 */ 427, 427, 427, 427, 427, 427, 427, 308, 427, 427,
- /* 2150 */ 375, 427, 427, 427, 379, 380, 381, 382, 383, 384,
- /* 2160 */ 427, 386, 427, 427, 427, 336, 427, 427, 427, 427,
- /* 2170 */ 427, 427, 427, 344, 427, 336, 427, 427, 349, 427,
- /* 2180 */ 351, 427, 427, 344, 427, 427, 427, 427, 349, 427,
- /* 2190 */ 351, 427, 427, 427, 427, 427, 427, 427, 308, 427,
- /* 2200 */ 427, 427, 427, 427, 375, 427, 427, 427, 379, 380,
- /* 2210 */ 381, 382, 383, 384, 375, 386, 308, 427, 379, 380,
- /* 2220 */ 381, 382, 383, 384, 427, 386, 336, 427, 427, 427,
- /* 2230 */ 427, 427, 427, 427, 344, 427, 427, 427, 427, 349,
- /* 2240 */ 427, 351, 427, 427, 336, 427, 427, 427, 427, 427,
- /* 2250 */ 427, 427, 344, 427, 427, 427, 427, 349, 427, 351,
- /* 2260 */ 427, 427, 427, 427, 427, 375, 427, 427, 427, 379,
- /* 2270 */ 380, 381, 382, 383, 384, 308, 386, 427, 427, 427,
- /* 2280 */ 427, 427, 427, 375, 427, 427, 427, 379, 380, 381,
- /* 2290 */ 382, 383, 384, 427, 386, 427, 308, 427, 427, 427,
- /* 2300 */ 427, 427, 427, 336, 427, 427, 427, 427, 427, 427,
- /* 2310 */ 427, 344, 427, 427, 427, 427, 349, 427, 351, 427,
- /* 2320 */ 427, 427, 427, 427, 336, 427, 427, 427, 427, 427,
- /* 2330 */ 427, 427, 344, 427, 427, 427, 427, 349, 427, 351,
- /* 2340 */ 427, 427, 375, 427, 427, 427, 379, 380, 381, 382,
- /* 2350 */ 383, 384, 308, 386, 427, 427, 427, 427, 427, 427,
- /* 2360 */ 427, 427, 308, 375, 427, 427, 427, 379, 380, 381,
- /* 2370 */ 382, 383, 384, 427, 386, 427, 427, 427, 427, 427,
- /* 2380 */ 336, 427, 427, 427, 427, 427, 427, 427, 344, 427,
- /* 2390 */ 336, 427, 427, 349, 316, 351, 427, 427, 344, 427,
- /* 2400 */ 427, 427, 427, 349, 427, 351, 427, 427, 427, 427,
- /* 2410 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 375,
- /* 2420 */ 427, 427, 344, 379, 380, 381, 382, 383, 384, 375,
- /* 2430 */ 386, 427, 427, 379, 380, 381, 382, 383, 384, 427,
- /* 2440 */ 386, 316, 364, 427, 427, 427, 427, 427, 427, 427,
- /* 2450 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 427,
- /* 2460 */ 427, 383, 427, 427, 427, 427, 427, 427, 427, 344,
- /* 2470 */ 427, 427, 427, 427, 427, 427, 398, 399, 400, 427,
- /* 2480 */ 402, 427, 427, 405, 427, 427, 427, 427, 427, 364,
- /* 2490 */ 427, 427, 427, 427, 427, 427, 418, 427, 427, 427,
- /* 2500 */ 422, 427, 427, 427, 427, 427, 427, 427, 383, 427,
- /* 2510 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 427,
- /* 2520 */ 427, 427, 427, 398, 399, 400, 427, 402, 427, 427,
- /* 2530 */ 405, 427, 427, 427, 427, 427, 427, 427, 427, 427,
- /* 2540 */ 427, 427, 427, 418, 427, 427, 427, 422,
+ /* 930 */ 210, 211, 212, 213, 382, 18, 308, 308, 43, 364,
+ /* 940 */ 23, 364, 225, 226, 0, 64, 65, 0, 338, 397,
+ /* 950 */ 398, 399, 71, 401, 37, 38, 336, 157, 41, 349,
+ /* 960 */ 316, 316, 81, 82, 308, 336, 42, 43, 411, 22,
+ /* 970 */ 157, 327, 327, 344, 57, 58, 59, 349, 349, 404,
+ /* 980 */ 351, 404, 315, 373, 374, 375, 308, 308, 344, 344,
+ /* 990 */ 316, 47, 417, 364, 417, 385, 421, 56, 421, 364,
+ /* 1000 */ 244, 327, 308, 374, 308, 349, 89, 378, 379, 380,
+ /* 1010 */ 381, 382, 383, 316, 385, 336, 43, 388, 344, 12,
+ /* 1020 */ 13, 392, 393, 344, 327, 197, 316, 349, 349, 22,
+ /* 1030 */ 351, 93, 91, 404, 96, 228, 348, 327, 316, 404,
+ /* 1040 */ 33, 344, 35, 349, 127, 349, 417, 0, 308, 327,
+ /* 1050 */ 421, 317, 417, 374, 344, 308, 421, 378, 379, 380,
+ /* 1060 */ 381, 382, 383, 56, 385, 316, 344, 388, 43, 22,
+ /* 1070 */ 197, 392, 393, 394, 316, 68, 327, 157, 158, 162,
+ /* 1080 */ 163, 164, 403, 336, 167, 327, 43, 93, 316, 349,
+ /* 1090 */ 96, 344, 93, 344, 93, 96, 349, 96, 351, 327,
+ /* 1100 */ 183, 0, 344, 186, 377, 188, 189, 190, 191, 192,
+ /* 1110 */ 35, 364, 43, 316, 308, 90, 344, 35, 111, 61,
+ /* 1120 */ 316, 374, 43, 22, 327, 378, 379, 380, 381, 382,
+ /* 1130 */ 383, 327, 385, 90, 43, 388, 43, 402, 19, 392,
+ /* 1140 */ 393, 344, 336, 248, 227, 43, 43, 341, 344, 43,
+ /* 1150 */ 344, 404, 33, 89, 308, 349, 13, 351, 43, 90,
+ /* 1160 */ 46, 1, 2, 99, 417, 395, 47, 405, 421, 90,
+ /* 1170 */ 418, 52, 53, 54, 55, 56, 43, 170, 35, 172,
+ /* 1180 */ 374, 90, 336, 90, 378, 379, 380, 381, 382, 383,
+ /* 1190 */ 344, 385, 90, 90, 308, 349, 90, 351, 125, 126,
+ /* 1200 */ 193, 194, 418, 89, 418, 90, 43, 88, 43, 229,
+ /* 1210 */ 91, 35, 205, 206, 207, 208, 209, 210, 211, 246,
+ /* 1220 */ 374, 47, 336, 90, 378, 379, 380, 381, 382, 383,
+ /* 1230 */ 344, 385, 366, 372, 388, 349, 43, 351, 392, 393,
+ /* 1240 */ 394, 43, 316, 124, 68, 371, 168, 172, 42, 403,
+ /* 1250 */ 356, 193, 20, 90, 172, 90, 316, 356, 316, 152,
+ /* 1260 */ 374, 354, 354, 308, 378, 379, 380, 381, 382, 383,
+ /* 1270 */ 344, 385, 316, 316, 388, 156, 316, 20, 392, 393,
+ /* 1280 */ 394, 310, 310, 90, 20, 370, 320, 351, 90, 403,
+ /* 1290 */ 364, 336, 20, 174, 320, 176, 363, 20, 320, 344,
+ /* 1300 */ 365, 320, 363, 320, 349, 320, 351, 316, 382, 320,
+ /* 1310 */ 310, 336, 336, 336, 316, 336, 310, 308, 336, 364,
+ /* 1320 */ 336, 370, 336, 397, 398, 399, 336, 401, 336, 374,
+ /* 1330 */ 404, 336, 336, 378, 379, 380, 381, 382, 383, 318,
+ /* 1340 */ 385, 175, 349, 417, 318, 336, 316, 421, 351, 316,
+ /* 1350 */ 318, 363, 234, 344, 349, 154, 369, 349, 349, 404,
+ /* 1360 */ 351, 359, 349, 349, 349, 20, 318, 308, 357, 359,
+ /* 1370 */ 332, 318, 417, 364, 235, 344, 421, 349, 410, 359,
+ /* 1380 */ 377, 359, 241, 374, 308, 349, 349, 378, 379, 380,
+ /* 1390 */ 381, 382, 383, 349, 385, 336, 161, 243, 349, 242,
+ /* 1400 */ 230, 372, 20, 344, 226, 344, 247, 245, 349, 89,
+ /* 1410 */ 351, 376, 336, 404, 410, 89, 349, 250, 326, 36,
+ /* 1420 */ 344, 340, 318, 311, 412, 349, 417, 351, 410, 413,
+ /* 1430 */ 421, 407, 409, 374, 308, 408, 419, 378, 379, 380,
+ /* 1440 */ 381, 382, 383, 391, 385, 316, 310, 388, 362, 319,
+ /* 1450 */ 374, 392, 393, 419, 378, 379, 380, 381, 382, 383,
+ /* 1460 */ 308, 385, 336, 367, 425, 420, 420, 330, 330, 419,
+ /* 1470 */ 344, 420, 330, 0, 306, 349, 0, 351, 177, 0,
+ /* 1480 */ 0, 42, 0, 35, 187, 35, 35, 35, 336, 187,
+ /* 1490 */ 0, 35, 35, 341, 187, 0, 344, 187, 422, 423,
+ /* 1500 */ 374, 349, 0, 351, 378, 379, 380, 381, 382, 383,
+ /* 1510 */ 308, 385, 35, 0, 388, 22, 0, 170, 35, 393,
+ /* 1520 */ 172, 0, 308, 166, 165, 0, 374, 0, 0, 0,
+ /* 1530 */ 378, 379, 380, 381, 382, 383, 308, 385, 336, 0,
+ /* 1540 */ 46, 42, 0, 0, 149, 0, 344, 144, 0, 35,
+ /* 1550 */ 336, 349, 0, 351, 0, 0, 0, 144, 344, 0,
+ /* 1560 */ 0, 0, 0, 349, 336, 351, 0, 0, 0, 0,
+ /* 1570 */ 0, 0, 344, 0, 0, 0, 374, 349, 0, 351,
+ /* 1580 */ 378, 379, 380, 381, 382, 383, 42, 385, 374, 308,
+ /* 1590 */ 0, 0, 378, 379, 380, 381, 382, 383, 0, 385,
+ /* 1600 */ 0, 0, 374, 22, 0, 0, 378, 379, 380, 381,
+ /* 1610 */ 382, 383, 0, 385, 0, 387, 0, 336, 416, 0,
+ /* 1620 */ 56, 0, 341, 39, 42, 344, 33, 0, 56, 0,
+ /* 1630 */ 349, 43, 351, 46, 14, 46, 14, 423, 0, 40,
+ /* 1640 */ 47, 39, 308, 35, 47, 52, 53, 54, 55, 56,
+ /* 1650 */ 0, 0, 0, 161, 308, 374, 39, 0, 0, 378,
+ /* 1660 */ 379, 380, 381, 382, 383, 0, 385, 0, 308, 62,
+ /* 1670 */ 336, 0, 39, 0, 35, 341, 39, 47, 344, 0,
+ /* 1680 */ 35, 88, 336, 349, 91, 351, 39, 47, 0, 35,
+ /* 1690 */ 344, 47, 39, 0, 0, 349, 336, 351, 0, 0,
+ /* 1700 */ 22, 35, 96, 0, 344, 35, 22, 98, 374, 349,
+ /* 1710 */ 0, 351, 378, 379, 380, 381, 382, 383, 43, 385,
+ /* 1720 */ 374, 35, 308, 43, 378, 379, 380, 381, 382, 383,
+ /* 1730 */ 35, 385, 0, 22, 374, 22, 308, 0, 378, 379,
+ /* 1740 */ 380, 381, 382, 383, 22, 385, 153, 154, 308, 156,
+ /* 1750 */ 336, 49, 35, 160, 0, 35, 0, 0, 344, 35,
+ /* 1760 */ 22, 20, 0, 349, 336, 351, 35, 157, 0, 176,
+ /* 1770 */ 22, 0, 344, 0, 159, 157, 336, 349, 154, 351,
+ /* 1780 */ 0, 0, 157, 0, 344, 89, 173, 90, 374, 349,
+ /* 1790 */ 35, 351, 378, 379, 380, 381, 382, 383, 0, 385,
+ /* 1800 */ 0, 89, 374, 155, 89, 308, 378, 379, 380, 381,
+ /* 1810 */ 382, 383, 153, 385, 374, 308, 182, 39, 378, 379,
+ /* 1820 */ 380, 381, 382, 383, 89, 385, 99, 43, 46, 89,
+ /* 1830 */ 43, 308, 90, 336, 89, 43, 90, 90, 89, 46,
+ /* 1840 */ 90, 344, 231, 336, 43, 46, 349, 89, 351, 89,
+ /* 1850 */ 89, 344, 43, 90, 89, 308, 349, 231, 351, 336,
+ /* 1860 */ 90, 46, 90, 46, 46, 90, 225, 344, 43, 90,
+ /* 1870 */ 35, 374, 349, 231, 351, 378, 379, 380, 381, 382,
+ /* 1880 */ 383, 374, 385, 336, 35, 378, 379, 380, 381, 382,
+ /* 1890 */ 383, 344, 385, 35, 35, 35, 349, 374, 351, 35,
+ /* 1900 */ 2, 378, 379, 380, 381, 382, 383, 22, 385, 193,
+ /* 1910 */ 43, 308, 89, 22, 90, 89, 46, 90, 89, 100,
+ /* 1920 */ 90, 374, 89, 89, 46, 378, 379, 380, 381, 382,
+ /* 1930 */ 383, 308, 385, 89, 35, 90, 35, 89, 195, 336,
+ /* 1940 */ 90, 35, 89, 35, 90, 89, 35, 344, 35, 113,
+ /* 1950 */ 89, 308, 349, 90, 351, 90, 113, 89, 22, 336,
+ /* 1960 */ 113, 89, 35, 101, 113, 89, 89, 344, 43, 22,
+ /* 1970 */ 61, 308, 349, 35, 351, 35, 62, 374, 35, 336,
+ /* 1980 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 35,
+ /* 1990 */ 35, 308, 349, 35, 351, 43, 68, 374, 35, 336,
+ /* 2000 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 87,
+ /* 2010 */ 22, 35, 349, 22, 351, 35, 35, 374, 68, 336,
+ /* 2020 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 35,
+ /* 2030 */ 35, 308, 349, 35, 351, 35, 22, 374, 35, 0,
+ /* 2040 */ 35, 378, 379, 380, 381, 382, 383, 308, 385, 47,
+ /* 2050 */ 39, 0, 35, 47, 39, 0, 35, 374, 39, 336,
+ /* 2060 */ 47, 378, 379, 380, 381, 382, 383, 344, 385, 0,
+ /* 2070 */ 35, 47, 349, 39, 351, 336, 0, 35, 35, 0,
+ /* 2080 */ 22, 21, 21, 344, 22, 22, 20, 426, 349, 426,
+ /* 2090 */ 351, 426, 426, 426, 426, 426, 426, 374, 308, 426,
+ /* 2100 */ 426, 378, 379, 380, 381, 382, 383, 426, 385, 426,
+ /* 2110 */ 426, 426, 426, 374, 308, 426, 426, 378, 379, 380,
+ /* 2120 */ 381, 382, 383, 426, 385, 426, 336, 426, 426, 426,
+ /* 2130 */ 426, 426, 426, 426, 344, 426, 426, 426, 426, 349,
+ /* 2140 */ 426, 351, 336, 426, 426, 426, 426, 426, 426, 426,
+ /* 2150 */ 344, 426, 426, 426, 426, 349, 426, 351, 426, 426,
+ /* 2160 */ 426, 426, 426, 426, 374, 308, 426, 426, 378, 379,
+ /* 2170 */ 380, 381, 382, 383, 426, 385, 426, 426, 426, 426,
+ /* 2180 */ 374, 308, 426, 426, 378, 379, 380, 381, 382, 383,
+ /* 2190 */ 426, 385, 426, 336, 426, 426, 426, 426, 426, 426,
+ /* 2200 */ 426, 344, 426, 426, 426, 426, 349, 426, 351, 336,
+ /* 2210 */ 426, 426, 426, 426, 426, 426, 426, 344, 426, 426,
+ /* 2220 */ 426, 426, 349, 426, 351, 426, 426, 426, 426, 426,
+ /* 2230 */ 426, 374, 426, 426, 426, 378, 379, 380, 381, 382,
+ /* 2240 */ 383, 426, 385, 426, 426, 426, 426, 374, 426, 426,
+ /* 2250 */ 426, 378, 379, 380, 381, 382, 383, 426, 385, 426,
+ /* 2260 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2270 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2280 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2290 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2300 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2310 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2320 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2330 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2340 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2350 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2360 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2370 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2380 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
+ /* 2390 */ 426, 426,
};
#define YY_SHIFT_COUNT (666)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (1943)
+#define YY_SHIFT_MAX (2079)
static const unsigned short int yy_shift_ofst[] = {
/* 0 */ 917, 0, 0, 62, 62, 264, 264, 264, 326, 326,
/* 10 */ 264, 264, 391, 593, 720, 593, 593, 593, 593, 593,
/* 20 */ 593, 593, 593, 593, 593, 593, 593, 593, 593, 593,
/* 30 */ 593, 593, 593, 593, 593, 593, 593, 593, 593, 593,
- /* 40 */ 593, 593, 325, 325, 361, 361, 361, 1019, 1019, 473,
- /* 50 */ 1019, 1019, 389, 523, 283, 340, 283, 17, 17, 19,
- /* 60 */ 19, 55, 6, 283, 283, 17, 17, 17, 17, 17,
- /* 70 */ 17, 17, 17, 17, 17, 9, 17, 17, 17, 24,
- /* 80 */ 17, 17, 102, 17, 17, 102, 109, 17, 102, 102,
- /* 90 */ 102, 17, 135, 719, 662, 683, 683, 150, 213, 213,
+ /* 40 */ 593, 593, 265, 265, 17, 17, 17, 1007, 1007, 268,
+ /* 50 */ 1007, 1007, 160, 30, 56, 200, 56, 11, 11, 87,
+ /* 60 */ 87, 55, 165, 56, 56, 11, 11, 11, 11, 11,
+ /* 70 */ 11, 11, 11, 11, 11, 10, 11, 11, 11, 18,
+ /* 80 */ 11, 11, 67, 11, 11, 67, 123, 11, 67, 67,
+ /* 90 */ 67, 11, 227, 719, 662, 683, 683, 150, 213, 213,
/* 100 */ 213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
- /* 110 */ 213, 213, 213, 213, 213, 213, 213, 417, 155, 6,
- /* 120 */ 630, 630, 757, 634, 909, 501, 501, 501, 634, 195,
- /* 130 */ 195, 24, 292, 292, 102, 102, 255, 255, 287, 342,
- /* 140 */ 198, 198, 198, 198, 198, 198, 198, 660, 21, 383,
- /* 150 */ 565, 635, 5, 174, 125, 747, 873, 229, 497, 856,
- /* 160 */ 939, 552, 776, 552, 740, 885, 885, 885, 892, 948,
- /* 170 */ 955, 1145, 1025, 1178, 1203, 1203, 1178, 1085, 1085, 1203,
- /* 180 */ 1203, 1203, 1219, 1219, 1243, 9, 24, 9, 1248, 1253,
- /* 190 */ 9, 1248, 9, 9, 9, 1203, 9, 1219, 102, 102,
- /* 200 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 1203,
- /* 210 */ 1219, 255, 1243, 135, 1150, 24, 135, 1203, 1203, 1248,
- /* 220 */ 135, 1102, 255, 255, 255, 255, 1102, 255, 1189, 135,
- /* 230 */ 287, 135, 195, 1332, 255, 1134, 1102, 255, 255, 1134,
- /* 240 */ 1102, 255, 255, 102, 1133, 1218, 1134, 1140, 1142, 1159,
- /* 250 */ 955, 1164, 195, 1372, 1151, 1154, 1156, 1151, 1154, 1151,
- /* 260 */ 1154, 1313, 1319, 255, 342, 1203, 135, 1379, 1219, 2548,
- /* 270 */ 2548, 2548, 2548, 2548, 2548, 2548, 83, 1653, 214, 337,
- /* 280 */ 126, 209, 492, 562, 625, 672, 535, 322, 713, 713,
- /* 290 */ 713, 713, 713, 713, 713, 713, 717, 840, 405, 405,
- /* 300 */ 69, 458, 197, 309, 85, 111, 8, 394, 170, 170,
- /* 310 */ 170, 170, 929, 1021, 934, 966, 1002, 1020, 1026, 1119,
- /* 320 */ 1121, 516, 841, 1034, 1035, 1048, 1071, 1072, 1079, 1083,
- /* 330 */ 1148, 910, 994, 1010, 1088, 1008, 1050, 1040, 1097, 1066,
- /* 340 */ 1110, 1115, 1117, 1124, 1125, 1127, 731, 1139, 1141, 1153,
- /* 350 */ 1206, 1440, 1454, 1281, 1459, 1461, 1420, 1463, 1429, 1280,
- /* 360 */ 1433, 1434, 1435, 1284, 1472, 1438, 1439, 1288, 1477, 1292,
- /* 370 */ 1478, 1451, 1489, 1468, 1491, 1457, 1322, 1325, 1497, 1498,
- /* 380 */ 1333, 1335, 1504, 1505, 1456, 1506, 1465, 1508, 1509, 1510,
- /* 390 */ 1362, 1513, 1514, 1515, 1516, 1517, 1374, 1484, 1520, 1377,
- /* 400 */ 1528, 1529, 1530, 1537, 1541, 1542, 1543, 1544, 1545, 1546,
- /* 410 */ 1547, 1549, 1551, 1552, 1481, 1524, 1525, 1526, 1554, 1556,
- /* 420 */ 1557, 1538, 1559, 1561, 1562, 1564, 1566, 1512, 1569, 1518,
- /* 430 */ 1570, 1571, 1531, 1533, 1534, 1567, 1532, 1578, 1555, 1576,
- /* 440 */ 1563, 1565, 1602, 1605, 1607, 1579, 1430, 1608, 1609, 1620,
- /* 450 */ 1560, 1623, 1624, 1590, 1582, 1587, 1628, 1595, 1584, 1594,
- /* 460 */ 1634, 1601, 1592, 1598, 1641, 1610, 1596, 1611, 1646, 1647,
- /* 470 */ 1648, 1651, 1558, 1568, 1618, 1632, 1655, 1625, 1615, 1619,
- /* 480 */ 1626, 1630, 1645, 1663, 1654, 1668, 1656, 1631, 1669, 1657,
- /* 490 */ 1650, 1681, 1658, 1682, 1661, 1695, 1675, 1678, 1699, 1553,
- /* 500 */ 1666, 1703, 1540, 1689, 1572, 1550, 1714, 1715, 1574, 1573,
- /* 510 */ 1716, 1717, 1718, 1633, 1629, 1686, 1583, 1723, 1635, 1580,
- /* 520 */ 1637, 1728, 1694, 1586, 1659, 1638, 1688, 1693, 1511, 1664,
- /* 530 */ 1662, 1665, 1673, 1674, 1677, 1697, 1680, 1679, 1683, 1690,
- /* 540 */ 1691, 1704, 1700, 1721, 1696, 1706, 1519, 1692, 1698, 1725,
- /* 550 */ 1575, 1740, 1738, 1741, 1701, 1743, 1577, 1702, 1755, 1758,
- /* 560 */ 1759, 1760, 1761, 1763, 1702, 1797, 1779, 1612, 1768, 1727,
- /* 570 */ 1713, 1729, 1722, 1730, 1724, 1769, 1731, 1732, 1771, 1800,
- /* 580 */ 1649, 1734, 1742, 1735, 1789, 1791, 1751, 1737, 1795, 1752,
- /* 590 */ 1753, 1810, 1765, 1757, 1813, 1766, 1762, 1814, 1767, 1744,
- /* 600 */ 1745, 1747, 1748, 1828, 1764, 1773, 1774, 1831, 1780, 1821,
- /* 610 */ 1821, 1846, 1808, 1812, 1836, 1840, 1841, 1843, 1844, 1845,
- /* 620 */ 1852, 1822, 1804, 1849, 1858, 1859, 1873, 1861, 1875, 1865,
- /* 630 */ 1866, 1842, 1615, 1876, 1619, 1877, 1878, 1879, 1880, 1886,
- /* 640 */ 1881, 1918, 1884, 1874, 1883, 1920, 1888, 1882, 1885, 1926,
- /* 650 */ 1893, 1887, 1892, 1933, 1901, 1891, 1898, 1939, 1906, 1909,
- /* 660 */ 1943, 1923, 1925, 1927, 1928, 1931, 1934,
+ /* 110 */ 213, 213, 213, 213, 213, 213, 213, 517, 881, 165,
+ /* 120 */ 403, 403, 573, 386, 849, 361, 361, 361, 386, 298,
+ /* 130 */ 298, 18, 350, 350, 67, 67, 294, 294, 270, 357,
+ /* 140 */ 198, 198, 198, 198, 198, 198, 198, 1119, 21, 383,
+ /* 150 */ 501, 105, 665, 484, 715, 828, 366, 799, 506, 800,
+ /* 160 */ 717, 649, 717, 924, 756, 756, 756, 807, 873, 980,
+ /* 170 */ 1174, 1078, 1206, 1232, 1232, 1206, 1107, 1107, 1232, 1232,
+ /* 180 */ 1232, 1257, 1257, 1264, 10, 18, 10, 1272, 1277, 10,
+ /* 190 */ 1272, 10, 10, 10, 1232, 10, 1257, 67, 67, 67,
+ /* 200 */ 67, 67, 67, 67, 67, 67, 67, 67, 1232, 1257,
+ /* 210 */ 294, 1264, 227, 1166, 18, 227, 1232, 1232, 1272, 227,
+ /* 220 */ 1118, 294, 294, 294, 294, 1118, 294, 1201, 227, 270,
+ /* 230 */ 227, 298, 1345, 294, 1139, 1118, 294, 294, 1139, 1118,
+ /* 240 */ 294, 294, 67, 1141, 1235, 1139, 1154, 1157, 1170, 980,
+ /* 250 */ 1178, 298, 1382, 1159, 1162, 1167, 1159, 1162, 1159, 1162,
+ /* 260 */ 1320, 1326, 294, 357, 1232, 227, 1383, 1257, 2259, 2259,
+ /* 270 */ 2259, 2259, 2259, 2259, 2259, 83, 1593, 214, 724, 126,
+ /* 280 */ 209, 491, 562, 622, 813, 535, 321, 698, 698, 698,
+ /* 290 */ 698, 698, 698, 698, 698, 521, 309, 13, 13, 115,
+ /* 300 */ 69, 599, 267, 708, 194, 377, 190, 465, 49, 49,
+ /* 310 */ 49, 49, 684, 944, 938, 994, 999, 1001, 947, 1047,
+ /* 320 */ 1101, 941, 920, 1025, 1043, 1069, 1079, 1091, 1093, 1102,
+ /* 330 */ 1160, 1073, 973, 895, 1103, 1075, 1082, 1058, 1106, 1114,
+ /* 340 */ 1115, 1133, 1163, 1165, 1193, 1198, 1064, 860, 1143, 1176,
+ /* 350 */ 839, 1473, 1476, 1301, 1479, 1480, 1439, 1482, 1448, 1297,
+ /* 360 */ 1450, 1451, 1452, 1302, 1490, 1456, 1457, 1307, 1495, 1310,
+ /* 370 */ 1502, 1477, 1513, 1493, 1516, 1483, 1348, 1347, 1521, 1527,
+ /* 380 */ 1357, 1359, 1525, 1528, 1494, 1529, 1499, 1539, 1542, 1543,
+ /* 390 */ 1395, 1545, 1552, 1554, 1555, 1556, 1403, 1514, 1548, 1413,
+ /* 400 */ 1559, 1560, 1561, 1562, 1566, 1567, 1568, 1569, 1570, 1571,
+ /* 410 */ 1573, 1574, 1575, 1578, 1544, 1590, 1591, 1598, 1600, 1601,
+ /* 420 */ 1612, 1581, 1604, 1605, 1614, 1616, 1619, 1564, 1621, 1572,
+ /* 430 */ 1627, 1629, 1582, 1584, 1588, 1620, 1587, 1622, 1589, 1638,
+ /* 440 */ 1599, 1602, 1650, 1651, 1652, 1617, 1492, 1657, 1658, 1665,
+ /* 450 */ 1607, 1667, 1671, 1608, 1597, 1633, 1673, 1639, 1630, 1637,
+ /* 460 */ 1679, 1645, 1640, 1647, 1688, 1654, 1644, 1653, 1693, 1694,
+ /* 470 */ 1698, 1699, 1609, 1606, 1666, 1678, 1703, 1670, 1675, 1680,
+ /* 480 */ 1686, 1695, 1684, 1710, 1711, 1732, 1713, 1702, 1737, 1722,
+ /* 490 */ 1717, 1754, 1720, 1756, 1724, 1757, 1738, 1741, 1762, 1610,
+ /* 500 */ 1731, 1768, 1613, 1748, 1618, 1624, 1771, 1773, 1625, 1615,
+ /* 510 */ 1780, 1781, 1783, 1696, 1697, 1755, 1634, 1798, 1712, 1648,
+ /* 520 */ 1715, 1800, 1778, 1659, 1735, 1727, 1782, 1784, 1611, 1740,
+ /* 530 */ 1742, 1745, 1746, 1747, 1749, 1787, 1750, 1758, 1760, 1761,
+ /* 540 */ 1763, 1792, 1793, 1799, 1765, 1801, 1626, 1770, 1772, 1815,
+ /* 550 */ 1641, 1809, 1817, 1818, 1775, 1825, 1642, 1779, 1835, 1849,
+ /* 560 */ 1858, 1859, 1860, 1864, 1779, 1898, 1885, 1716, 1867, 1823,
+ /* 570 */ 1824, 1826, 1827, 1829, 1830, 1870, 1833, 1834, 1878, 1891,
+ /* 580 */ 1743, 1844, 1819, 1845, 1899, 1901, 1848, 1850, 1906, 1853,
+ /* 590 */ 1854, 1908, 1856, 1863, 1911, 1861, 1865, 1913, 1868, 1836,
+ /* 600 */ 1843, 1847, 1851, 1936, 1862, 1872, 1876, 1927, 1877, 1925,
+ /* 610 */ 1925, 1947, 1914, 1909, 1938, 1940, 1943, 1945, 1954, 1955,
+ /* 620 */ 1958, 1928, 1922, 1952, 1963, 1965, 1988, 1976, 1991, 1980,
+ /* 630 */ 1981, 1950, 1675, 1985, 1680, 1994, 1995, 1998, 2000, 2014,
+ /* 640 */ 2003, 2039, 2005, 2002, 2011, 2051, 2017, 2006, 2015, 2055,
+ /* 650 */ 2021, 2013, 2019, 2069, 2035, 2024, 2034, 2076, 2042, 2043,
+ /* 660 */ 2079, 2058, 2060, 2062, 2063, 2061, 2066,
};
-#define YY_REDUCE_COUNT (275)
-#define YY_REDUCE_MIN (-389)
-#define YY_REDUCE_MAX (2125)
+#define YY_REDUCE_COUNT (274)
+#define YY_REDUCE_MIN (-403)
+#define YY_REDUCE_MAX (1873)
static const short yy_reduce_ofst[] = {
- /* 0 */ -75, 602, 629, -279, -15, 753, 815, 867, 923, 933,
- /* 10 */ 273, 982, 104, 1042, 1052, 1101, 1152, 1204, 1214, 1231,
- /* 20 */ 1291, 1308, 1376, 1394, 1453, 1502, 1523, 1581, 1591, 1643,
- /* 30 */ 1660, 1709, 1726, 1775, 1829, 1839, 1890, 1908, 1967, 1988,
- /* 40 */ 2044, 2054, 2078, 2125, -312, 33, 425, -295, 436, 490,
- /* 50 */ -308, 654, -345, -68, 77, 151, 246, -316, -310, -307,
- /* 60 */ -276, -285, -238, -87, -78, -313, -33, 239, 367, 418,
- /* 70 */ 420, 427, 505, 623, 689, 161, 698, 722, 723, -234,
- /* 80 */ 729, 730, 727, 742, 744, -244, -153, 749, 754, -10,
- /* 90 */ 782, 767, 91, -124, -389, -389, -389, -164, -51, -30,
- /* 100 */ -21, 100, 148, 205, 272, 311, 323, 357, 381, 382,
- /* 110 */ 430, 464, 540, 603, 607, 640, 641, 28, -263, -335,
- /* 120 */ 392, 443, 438, -229, -85, 234, 467, 570, 79, 18,
- /* 130 */ 365, 312, 341, 413, 223, 665, 545, 596, 668, 610,
- /* 140 */ 90, 220, 249, 308, 373, 387, 398, 252, 534, 518,
- /* 150 */ 645, 571, 677, 693, 680, 781, 781, 839, 848, 817,
- /* 160 */ 793, 769, 769, 769, 783, 758, 762, 763, 777, 781,
- /* 170 */ 812, 814, 836, 876, 918, 919, 880, 884, 889, 928,
- /* 180 */ 936, 941, 935, 945, 894, 946, 914, 950, 912, 911,
- /* 190 */ 958, 916, 960, 961, 963, 969, 968, 976, 953, 956,
- /* 200 */ 957, 959, 964, 965, 974, 975, 984, 985, 986, 978,
- /* 210 */ 981, 947, 954, 1005, 930, 979, 1009, 1013, 1016, 971,
- /* 220 */ 1017, 980, 988, 991, 993, 995, 987, 998, 992, 1030,
- /* 230 */ 1022, 1038, 1014, 989, 1004, 962, 1000, 1023, 1028, 970,
- /* 240 */ 1011, 1031, 1033, 781, 973, 972, 983, 990, 996, 999,
- /* 250 */ 1024, 769, 1051, 1027, 997, 1029, 1003, 1018, 1036, 1032,
- /* 260 */ 1037, 1055, 1070, 1062, 1086, 1098, 1095, 1105, 1109, 1053,
- /* 270 */ 1068, 1113, 1114, 1118, 1132, 1149,
+ /* 0 */ 263, 629, 747, -278, -14, 679, 846, 886, 955, 1009,
+ /* 10 */ 272, 1059, 104, 1076, 1126, 806, 1152, 1202, 1214, 1228,
+ /* 20 */ 1281, 1334, 1346, 1360, 1414, 1428, 1440, 1497, 1507, 1523,
+ /* 30 */ 1547, 1603, 1623, 1643, 1663, 1683, 1723, 1739, 1790, 1806,
+ /* 40 */ 1857, 1873, 304, 926, 112, 435, 552, -295, 610, -3,
+ /* 50 */ -261, -154, -323, 305, 575, 577, 635, -310, -272, -312,
+ /* 60 */ -307, -403, -311, -284, -70, -90, 225, 346, 404, 414,
+ /* 70 */ 429, 434, 497, 644, 645, 339, 674, 697, 710, -342,
+ /* 80 */ 722, 749, -313, 758, 772, -244, -251, 797, -17, 20,
+ /* 90 */ 48, 804, 228, 86, -379, -379, -379, -240, -13, 100,
+ /* 100 */ 101, 130, 206, 375, 385, 428, 459, 461, 474, 522,
+ /* 110 */ 566, 628, 656, 678, 694, 696, 740, -226, -92, -72,
+ /* 120 */ -256, -130, 173, 4, 34, 186, 362, 440, 97, 91,
+ /* 130 */ 303, -234, -109, 180, 274, 536, 317, 545, 579, 234,
+ /* 140 */ -333, 40, 144, 175, 242, 319, 410, 450, 445, 348,
+ /* 150 */ 369, 495, 572, 557, 620, 620, 734, 667, 688, 727,
+ /* 160 */ 735, 735, 735, 770, 752, 784, 786, 762, 620, 861,
+ /* 170 */ 874, 866, 894, 940, 942, 901, 907, 908, 956, 957,
+ /* 180 */ 960, 971, 972, 915, 966, 936, 974, 933, 935, 978,
+ /* 190 */ 939, 981, 983, 985, 991, 989, 1000, 975, 976, 977,
+ /* 200 */ 979, 982, 984, 986, 990, 992, 995, 996, 998, 1006,
+ /* 210 */ 993, 951, 1021, 987, 997, 1026, 1030, 1033, 988, 1032,
+ /* 220 */ 1002, 1005, 1008, 1013, 1014, 1010, 1015, 1011, 1048, 1038,
+ /* 230 */ 1053, 1031, 1003, 1028, 968, 1020, 1036, 1037, 1004, 1022,
+ /* 240 */ 1044, 1049, 620, 1016, 1012, 1018, 1023, 1027, 1024, 1029,
+ /* 250 */ 735, 1061, 1035, 1045, 1017, 1039, 1046, 1034, 1051, 1050,
+ /* 260 */ 1052, 1081, 1067, 1092, 1129, 1104, 1112, 1136, 1096, 1086,
+ /* 270 */ 1137, 1138, 1142, 1130, 1168,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 10 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 20 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 30 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 40 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 50 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 60 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 70 */ 1464, 1464, 1464, 1464, 1464, 1538, 1464, 1464, 1464, 1464,
- /* 80 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 90 */ 1464, 1464, 1536, 1694, 1464, 1871, 1464, 1464, 1464, 1464,
- /* 100 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 110 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 120 */ 1464, 1464, 1538, 1464, 1536, 1883, 1883, 1883, 1464, 1464,
- /* 130 */ 1464, 1464, 1737, 1737, 1464, 1464, 1464, 1464, 1636, 1464,
- /* 140 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1729, 1464, 1952,
- /* 150 */ 1464, 1464, 1464, 1735, 1906, 1464, 1464, 1464, 1464, 1589,
- /* 160 */ 1898, 1875, 1889, 1876, 1873, 1937, 1937, 1937, 1892, 1464,
- /* 170 */ 1902, 1464, 1722, 1699, 1464, 1464, 1699, 1696, 1696, 1464,
- /* 180 */ 1464, 1464, 1464, 1464, 1464, 1538, 1464, 1538, 1464, 1464,
- /* 190 */ 1538, 1464, 1538, 1538, 1538, 1464, 1538, 1464, 1464, 1464,
- /* 200 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 210 */ 1464, 1464, 1464, 1536, 1731, 1464, 1536, 1464, 1464, 1464,
- /* 220 */ 1536, 1911, 1464, 1464, 1464, 1464, 1911, 1464, 1464, 1536,
- /* 230 */ 1464, 1536, 1464, 1464, 1464, 1913, 1911, 1464, 1464, 1913,
- /* 240 */ 1911, 1464, 1464, 1464, 1925, 1921, 1913, 1929, 1927, 1904,
- /* 250 */ 1902, 1889, 1464, 1464, 1943, 1939, 1955, 1943, 1939, 1943,
- /* 260 */ 1939, 1464, 1605, 1464, 1464, 1464, 1536, 1496, 1464, 1724,
- /* 270 */ 1737, 1639, 1639, 1639, 1539, 1469, 1464, 1464, 1464, 1464,
- /* 280 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1808, 1924,
- /* 290 */ 1923, 1847, 1846, 1845, 1843, 1807, 1464, 1601, 1806, 1805,
- /* 300 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1799, 1800,
- /* 310 */ 1798, 1797, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 320 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 330 */ 1872, 1464, 1940, 1944, 1464, 1464, 1464, 1464, 1464, 1783,
- /* 340 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 350 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 360 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 370 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 380 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 390 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 400 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 410 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 420 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 430 */ 1464, 1464, 1464, 1464, 1501, 1464, 1464, 1464, 1464, 1464,
- /* 440 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 450 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 460 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 470 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1573, 1572,
- /* 480 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 490 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 500 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 510 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1741, 1464, 1464,
- /* 520 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1905, 1464, 1464,
- /* 530 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 540 */ 1464, 1464, 1464, 1783, 1464, 1922, 1464, 1882, 1878, 1464,
- /* 550 */ 1464, 1874, 1782, 1464, 1464, 1938, 1464, 1464, 1464, 1464,
- /* 560 */ 1464, 1464, 1464, 1464, 1464, 1867, 1464, 1464, 1840, 1825,
- /* 570 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 580 */ 1793, 1464, 1464, 1464, 1464, 1464, 1633, 1464, 1464, 1464,
- /* 590 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1618,
- /* 600 */ 1616, 1615, 1614, 1464, 1611, 1464, 1464, 1464, 1464, 1642,
- /* 610 */ 1641, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 620 */ 1464, 1464, 1464, 1557, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 630 */ 1464, 1464, 1549, 1464, 1548, 1464, 1464, 1464, 1464, 1464,
- /* 640 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 650 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 660 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464,
+ /* 0 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 10 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 20 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 30 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 40 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 50 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 60 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 70 */ 1461, 1461, 1461, 1461, 1461, 1535, 1461, 1461, 1461, 1461,
+ /* 80 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 90 */ 1461, 1461, 1533, 1691, 1461, 1866, 1461, 1461, 1461, 1461,
+ /* 100 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 110 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 120 */ 1461, 1461, 1535, 1461, 1533, 1878, 1878, 1878, 1461, 1461,
+ /* 130 */ 1461, 1461, 1732, 1732, 1461, 1461, 1461, 1461, 1633, 1461,
+ /* 140 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1726, 1461, 1947,
+ /* 150 */ 1461, 1461, 1461, 1901, 1461, 1461, 1461, 1461, 1586, 1893,
+ /* 160 */ 1870, 1884, 1871, 1868, 1932, 1932, 1932, 1887, 1461, 1897,
+ /* 170 */ 1461, 1719, 1696, 1461, 1461, 1696, 1693, 1693, 1461, 1461,
+ /* 180 */ 1461, 1461, 1461, 1461, 1535, 1461, 1535, 1461, 1461, 1535,
+ /* 190 */ 1461, 1535, 1535, 1535, 1461, 1535, 1461, 1461, 1461, 1461,
+ /* 200 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 210 */ 1461, 1461, 1533, 1728, 1461, 1533, 1461, 1461, 1461, 1533,
+ /* 220 */ 1906, 1461, 1461, 1461, 1461, 1906, 1461, 1461, 1533, 1461,
+ /* 230 */ 1533, 1461, 1461, 1461, 1908, 1906, 1461, 1461, 1908, 1906,
+ /* 240 */ 1461, 1461, 1461, 1920, 1916, 1908, 1924, 1922, 1899, 1897,
+ /* 250 */ 1884, 1461, 1461, 1938, 1934, 1950, 1938, 1934, 1938, 1934,
+ /* 260 */ 1461, 1602, 1461, 1461, 1461, 1533, 1493, 1461, 1721, 1732,
+ /* 270 */ 1636, 1636, 1636, 1536, 1466, 1461, 1461, 1461, 1461, 1461,
+ /* 280 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1803, 1919, 1918,
+ /* 290 */ 1842, 1841, 1840, 1838, 1802, 1461, 1598, 1801, 1800, 1461,
+ /* 300 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1794, 1795,
+ /* 310 */ 1793, 1792, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 320 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 330 */ 1867, 1461, 1935, 1939, 1461, 1461, 1461, 1461, 1461, 1778,
+ /* 340 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 350 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 360 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 370 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 380 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 390 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 400 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 410 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 420 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 430 */ 1461, 1461, 1461, 1461, 1498, 1461, 1461, 1461, 1461, 1461,
+ /* 440 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 450 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 460 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 470 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1570, 1569,
+ /* 480 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 490 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 500 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 510 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1736, 1461, 1461,
+ /* 520 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1900, 1461, 1461,
+ /* 530 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 540 */ 1461, 1461, 1461, 1778, 1461, 1917, 1461, 1877, 1873, 1461,
+ /* 550 */ 1461, 1869, 1777, 1461, 1461, 1933, 1461, 1461, 1461, 1461,
+ /* 560 */ 1461, 1461, 1461, 1461, 1461, 1862, 1461, 1461, 1835, 1820,
+ /* 570 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 580 */ 1788, 1461, 1461, 1461, 1461, 1461, 1630, 1461, 1461, 1461,
+ /* 590 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1615,
+ /* 600 */ 1613, 1612, 1611, 1461, 1608, 1461, 1461, 1461, 1461, 1639,
+ /* 610 */ 1638, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 620 */ 1461, 1461, 1461, 1554, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 630 */ 1461, 1461, 1546, 1461, 1545, 1461, 1461, 1461, 1461, 1461,
+ /* 640 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 650 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 660 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -1686,62 +1642,61 @@ static const char *const yyTokenName[] = {
/* 368 */ "agg_func_opt",
/* 369 */ "bufsize_opt",
/* 370 */ "stream_name",
- /* 371 */ "into_opt",
- /* 372 */ "dnode_list",
- /* 373 */ "where_clause_opt",
- /* 374 */ "signed",
- /* 375 */ "literal_func",
- /* 376 */ "literal_list",
- /* 377 */ "table_alias",
- /* 378 */ "column_alias",
- /* 379 */ "expression",
- /* 380 */ "pseudo_column",
- /* 381 */ "column_reference",
- /* 382 */ "function_expression",
- /* 383 */ "subquery",
- /* 384 */ "star_func",
- /* 385 */ "star_func_para_list",
- /* 386 */ "noarg_func",
- /* 387 */ "other_para_list",
- /* 388 */ "star_func_para",
- /* 389 */ "predicate",
- /* 390 */ "compare_op",
- /* 391 */ "in_op",
- /* 392 */ "in_predicate_value",
- /* 393 */ "boolean_value_expression",
- /* 394 */ "boolean_primary",
- /* 395 */ "common_expression",
- /* 396 */ "from_clause_opt",
- /* 397 */ "table_reference_list",
- /* 398 */ "table_reference",
- /* 399 */ "table_primary",
- /* 400 */ "joined_table",
- /* 401 */ "alias_opt",
- /* 402 */ "parenthesized_joined_table",
- /* 403 */ "join_type",
- /* 404 */ "search_condition",
- /* 405 */ "query_specification",
- /* 406 */ "set_quantifier_opt",
- /* 407 */ "select_list",
- /* 408 */ "partition_by_clause_opt",
- /* 409 */ "range_opt",
- /* 410 */ "every_opt",
- /* 411 */ "fill_opt",
- /* 412 */ "twindow_clause_opt",
- /* 413 */ "group_by_clause_opt",
- /* 414 */ "having_clause_opt",
- /* 415 */ "select_item",
- /* 416 */ "fill_mode",
- /* 417 */ "group_by_list",
- /* 418 */ "query_expression_body",
- /* 419 */ "order_by_clause_opt",
- /* 420 */ "slimit_clause_opt",
- /* 421 */ "limit_clause_opt",
- /* 422 */ "query_primary",
- /* 423 */ "sort_specification_list",
- /* 424 */ "sort_specification",
- /* 425 */ "ordering_specification_opt",
- /* 426 */ "null_ordering_opt",
+ /* 371 */ "dnode_list",
+ /* 372 */ "where_clause_opt",
+ /* 373 */ "signed",
+ /* 374 */ "literal_func",
+ /* 375 */ "literal_list",
+ /* 376 */ "table_alias",
+ /* 377 */ "column_alias",
+ /* 378 */ "expression",
+ /* 379 */ "pseudo_column",
+ /* 380 */ "column_reference",
+ /* 381 */ "function_expression",
+ /* 382 */ "subquery",
+ /* 383 */ "star_func",
+ /* 384 */ "star_func_para_list",
+ /* 385 */ "noarg_func",
+ /* 386 */ "other_para_list",
+ /* 387 */ "star_func_para",
+ /* 388 */ "predicate",
+ /* 389 */ "compare_op",
+ /* 390 */ "in_op",
+ /* 391 */ "in_predicate_value",
+ /* 392 */ "boolean_value_expression",
+ /* 393 */ "boolean_primary",
+ /* 394 */ "common_expression",
+ /* 395 */ "from_clause_opt",
+ /* 396 */ "table_reference_list",
+ /* 397 */ "table_reference",
+ /* 398 */ "table_primary",
+ /* 399 */ "joined_table",
+ /* 400 */ "alias_opt",
+ /* 401 */ "parenthesized_joined_table",
+ /* 402 */ "join_type",
+ /* 403 */ "search_condition",
+ /* 404 */ "query_specification",
+ /* 405 */ "set_quantifier_opt",
+ /* 406 */ "select_list",
+ /* 407 */ "partition_by_clause_opt",
+ /* 408 */ "range_opt",
+ /* 409 */ "every_opt",
+ /* 410 */ "fill_opt",
+ /* 411 */ "twindow_clause_opt",
+ /* 412 */ "group_by_clause_opt",
+ /* 413 */ "having_clause_opt",
+ /* 414 */ "select_item",
+ /* 415 */ "fill_mode",
+ /* 416 */ "group_by_list",
+ /* 417 */ "query_expression_body",
+ /* 418 */ "order_by_clause_opt",
+ /* 419 */ "slimit_clause_opt",
+ /* 420 */ "limit_clause_opt",
+ /* 421 */ "query_primary",
+ /* 422 */ "sort_specification_list",
+ /* 423 */ "sort_specification",
+ /* 424 */ "ordering_specification_opt",
+ /* 425 */ "null_ordering_opt",
};
#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
@@ -2015,231 +1970,229 @@ static const char *const yyRuleName[] = {
/* 263 */ "agg_func_opt ::= AGGREGATE",
/* 264 */ "bufsize_opt ::=",
/* 265 */ "bufsize_opt ::= BUFSIZE NK_INTEGER",
- /* 266 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression",
+ /* 266 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression",
/* 267 */ "cmd ::= DROP STREAM exists_opt stream_name",
- /* 268 */ "into_opt ::=",
- /* 269 */ "into_opt ::= INTO full_table_name",
- /* 270 */ "stream_options ::=",
- /* 271 */ "stream_options ::= stream_options TRIGGER AT_ONCE",
- /* 272 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE",
- /* 273 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal",
- /* 274 */ "stream_options ::= stream_options WATERMARK duration_literal",
- /* 275 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER",
- /* 276 */ "cmd ::= KILL CONNECTION NK_INTEGER",
- /* 277 */ "cmd ::= KILL QUERY NK_STRING",
- /* 278 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
- /* 279 */ "cmd ::= BALANCE VGROUP",
- /* 280 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
- /* 281 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list",
- /* 282 */ "cmd ::= SPLIT VGROUP NK_INTEGER",
- /* 283 */ "dnode_list ::= DNODE NK_INTEGER",
- /* 284 */ "dnode_list ::= dnode_list DNODE NK_INTEGER",
- /* 285 */ "cmd ::= DELETE FROM full_table_name where_clause_opt",
- /* 286 */ "cmd ::= query_expression",
- /* 287 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression",
- /* 288 */ "cmd ::= INSERT INTO full_table_name query_expression",
- /* 289 */ "literal ::= NK_INTEGER",
- /* 290 */ "literal ::= NK_FLOAT",
- /* 291 */ "literal ::= NK_STRING",
- /* 292 */ "literal ::= NK_BOOL",
- /* 293 */ "literal ::= TIMESTAMP NK_STRING",
- /* 294 */ "literal ::= duration_literal",
- /* 295 */ "literal ::= NULL",
- /* 296 */ "literal ::= NK_QUESTION",
- /* 297 */ "duration_literal ::= NK_VARIABLE",
- /* 298 */ "signed ::= NK_INTEGER",
- /* 299 */ "signed ::= NK_PLUS NK_INTEGER",
- /* 300 */ "signed ::= NK_MINUS NK_INTEGER",
- /* 301 */ "signed ::= NK_FLOAT",
- /* 302 */ "signed ::= NK_PLUS NK_FLOAT",
- /* 303 */ "signed ::= NK_MINUS NK_FLOAT",
- /* 304 */ "signed_literal ::= signed",
- /* 305 */ "signed_literal ::= NK_STRING",
- /* 306 */ "signed_literal ::= NK_BOOL",
- /* 307 */ "signed_literal ::= TIMESTAMP NK_STRING",
- /* 308 */ "signed_literal ::= duration_literal",
- /* 309 */ "signed_literal ::= NULL",
- /* 310 */ "signed_literal ::= literal_func",
- /* 311 */ "signed_literal ::= NK_QUESTION",
- /* 312 */ "literal_list ::= signed_literal",
- /* 313 */ "literal_list ::= literal_list NK_COMMA signed_literal",
- /* 314 */ "db_name ::= NK_ID",
- /* 315 */ "table_name ::= NK_ID",
- /* 316 */ "column_name ::= NK_ID",
- /* 317 */ "function_name ::= NK_ID",
- /* 318 */ "table_alias ::= NK_ID",
- /* 319 */ "column_alias ::= NK_ID",
- /* 320 */ "user_name ::= NK_ID",
- /* 321 */ "topic_name ::= NK_ID",
- /* 322 */ "stream_name ::= NK_ID",
- /* 323 */ "cgroup_name ::= NK_ID",
- /* 324 */ "expression ::= literal",
- /* 325 */ "expression ::= pseudo_column",
- /* 326 */ "expression ::= column_reference",
- /* 327 */ "expression ::= function_expression",
- /* 328 */ "expression ::= subquery",
- /* 329 */ "expression ::= NK_LP expression NK_RP",
- /* 330 */ "expression ::= NK_PLUS expression",
- /* 331 */ "expression ::= NK_MINUS expression",
- /* 332 */ "expression ::= expression NK_PLUS expression",
- /* 333 */ "expression ::= expression NK_MINUS expression",
- /* 334 */ "expression ::= expression NK_STAR expression",
- /* 335 */ "expression ::= expression NK_SLASH expression",
- /* 336 */ "expression ::= expression NK_REM expression",
- /* 337 */ "expression ::= column_reference NK_ARROW NK_STRING",
- /* 338 */ "expression ::= expression NK_BITAND expression",
- /* 339 */ "expression ::= expression NK_BITOR expression",
- /* 340 */ "expression_list ::= expression",
- /* 341 */ "expression_list ::= expression_list NK_COMMA expression",
- /* 342 */ "column_reference ::= column_name",
- /* 343 */ "column_reference ::= table_name NK_DOT column_name",
- /* 344 */ "pseudo_column ::= ROWTS",
- /* 345 */ "pseudo_column ::= TBNAME",
- /* 346 */ "pseudo_column ::= table_name NK_DOT TBNAME",
- /* 347 */ "pseudo_column ::= QSTART",
- /* 348 */ "pseudo_column ::= QEND",
- /* 349 */ "pseudo_column ::= QDURATION",
- /* 350 */ "pseudo_column ::= WSTART",
- /* 351 */ "pseudo_column ::= WEND",
- /* 352 */ "pseudo_column ::= WDURATION",
- /* 353 */ "function_expression ::= function_name NK_LP expression_list NK_RP",
- /* 354 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP",
- /* 355 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP",
- /* 356 */ "function_expression ::= literal_func",
- /* 357 */ "literal_func ::= noarg_func NK_LP NK_RP",
- /* 358 */ "literal_func ::= NOW",
- /* 359 */ "noarg_func ::= NOW",
- /* 360 */ "noarg_func ::= TODAY",
- /* 361 */ "noarg_func ::= TIMEZONE",
- /* 362 */ "noarg_func ::= DATABASE",
- /* 363 */ "noarg_func ::= CLIENT_VERSION",
- /* 364 */ "noarg_func ::= SERVER_VERSION",
- /* 365 */ "noarg_func ::= SERVER_STATUS",
- /* 366 */ "noarg_func ::= CURRENT_USER",
- /* 367 */ "noarg_func ::= USER",
- /* 368 */ "star_func ::= COUNT",
- /* 369 */ "star_func ::= FIRST",
- /* 370 */ "star_func ::= LAST",
- /* 371 */ "star_func ::= LAST_ROW",
- /* 372 */ "star_func_para_list ::= NK_STAR",
- /* 373 */ "star_func_para_list ::= other_para_list",
- /* 374 */ "other_para_list ::= star_func_para",
- /* 375 */ "other_para_list ::= other_para_list NK_COMMA star_func_para",
- /* 376 */ "star_func_para ::= expression",
- /* 377 */ "star_func_para ::= table_name NK_DOT NK_STAR",
- /* 378 */ "predicate ::= expression compare_op expression",
- /* 379 */ "predicate ::= expression BETWEEN expression AND expression",
- /* 380 */ "predicate ::= expression NOT BETWEEN expression AND expression",
- /* 381 */ "predicate ::= expression IS NULL",
- /* 382 */ "predicate ::= expression IS NOT NULL",
- /* 383 */ "predicate ::= expression in_op in_predicate_value",
- /* 384 */ "compare_op ::= NK_LT",
- /* 385 */ "compare_op ::= NK_GT",
- /* 386 */ "compare_op ::= NK_LE",
- /* 387 */ "compare_op ::= NK_GE",
- /* 388 */ "compare_op ::= NK_NE",
- /* 389 */ "compare_op ::= NK_EQ",
- /* 390 */ "compare_op ::= LIKE",
- /* 391 */ "compare_op ::= NOT LIKE",
- /* 392 */ "compare_op ::= MATCH",
- /* 393 */ "compare_op ::= NMATCH",
- /* 394 */ "compare_op ::= CONTAINS",
- /* 395 */ "in_op ::= IN",
- /* 396 */ "in_op ::= NOT IN",
- /* 397 */ "in_predicate_value ::= NK_LP literal_list NK_RP",
- /* 398 */ "boolean_value_expression ::= boolean_primary",
- /* 399 */ "boolean_value_expression ::= NOT boolean_primary",
- /* 400 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression",
- /* 401 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression",
- /* 402 */ "boolean_primary ::= predicate",
- /* 403 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP",
- /* 404 */ "common_expression ::= expression",
- /* 405 */ "common_expression ::= boolean_value_expression",
- /* 406 */ "from_clause_opt ::=",
- /* 407 */ "from_clause_opt ::= FROM table_reference_list",
- /* 408 */ "table_reference_list ::= table_reference",
- /* 409 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference",
- /* 410 */ "table_reference ::= table_primary",
- /* 411 */ "table_reference ::= joined_table",
- /* 412 */ "table_primary ::= table_name alias_opt",
- /* 413 */ "table_primary ::= db_name NK_DOT table_name alias_opt",
- /* 414 */ "table_primary ::= subquery alias_opt",
- /* 415 */ "table_primary ::= parenthesized_joined_table",
- /* 416 */ "alias_opt ::=",
- /* 417 */ "alias_opt ::= table_alias",
- /* 418 */ "alias_opt ::= AS table_alias",
- /* 419 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP",
- /* 420 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP",
- /* 421 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition",
- /* 422 */ "join_type ::=",
- /* 423 */ "join_type ::= INNER",
- /* 424 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt",
- /* 425 */ "set_quantifier_opt ::=",
- /* 426 */ "set_quantifier_opt ::= DISTINCT",
- /* 427 */ "set_quantifier_opt ::= ALL",
- /* 428 */ "select_list ::= select_item",
- /* 429 */ "select_list ::= select_list NK_COMMA select_item",
- /* 430 */ "select_item ::= NK_STAR",
- /* 431 */ "select_item ::= common_expression",
- /* 432 */ "select_item ::= common_expression column_alias",
- /* 433 */ "select_item ::= common_expression AS column_alias",
- /* 434 */ "select_item ::= table_name NK_DOT NK_STAR",
- /* 435 */ "where_clause_opt ::=",
- /* 436 */ "where_clause_opt ::= WHERE search_condition",
- /* 437 */ "partition_by_clause_opt ::=",
- /* 438 */ "partition_by_clause_opt ::= PARTITION BY expression_list",
- /* 439 */ "twindow_clause_opt ::=",
- /* 440 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP",
- /* 441 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP",
- /* 442 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt",
- /* 443 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt",
- /* 444 */ "sliding_opt ::=",
- /* 445 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP",
- /* 446 */ "fill_opt ::=",
- /* 447 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP",
- /* 448 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP",
- /* 449 */ "fill_mode ::= NONE",
- /* 450 */ "fill_mode ::= PREV",
- /* 451 */ "fill_mode ::= NULL",
- /* 452 */ "fill_mode ::= LINEAR",
- /* 453 */ "fill_mode ::= NEXT",
- /* 454 */ "group_by_clause_opt ::=",
- /* 455 */ "group_by_clause_opt ::= GROUP BY group_by_list",
- /* 456 */ "group_by_list ::= expression",
- /* 457 */ "group_by_list ::= group_by_list NK_COMMA expression",
- /* 458 */ "having_clause_opt ::=",
- /* 459 */ "having_clause_opt ::= HAVING search_condition",
- /* 460 */ "range_opt ::=",
- /* 461 */ "range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP",
- /* 462 */ "every_opt ::=",
- /* 463 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP",
- /* 464 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt",
- /* 465 */ "query_expression_body ::= query_primary",
- /* 466 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body",
- /* 467 */ "query_expression_body ::= query_expression_body UNION query_expression_body",
- /* 468 */ "query_primary ::= query_specification",
- /* 469 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP",
- /* 470 */ "order_by_clause_opt ::=",
- /* 471 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
- /* 472 */ "slimit_clause_opt ::=",
- /* 473 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
- /* 474 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
- /* 475 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 476 */ "limit_clause_opt ::=",
- /* 477 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
- /* 478 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
- /* 479 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 480 */ "subquery ::= NK_LP query_expression NK_RP",
- /* 481 */ "search_condition ::= common_expression",
- /* 482 */ "sort_specification_list ::= sort_specification",
- /* 483 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
- /* 484 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt",
- /* 485 */ "ordering_specification_opt ::=",
- /* 486 */ "ordering_specification_opt ::= ASC",
- /* 487 */ "ordering_specification_opt ::= DESC",
- /* 488 */ "null_ordering_opt ::=",
- /* 489 */ "null_ordering_opt ::= NULLS FIRST",
- /* 490 */ "null_ordering_opt ::= NULLS LAST",
+ /* 268 */ "stream_options ::=",
+ /* 269 */ "stream_options ::= stream_options TRIGGER AT_ONCE",
+ /* 270 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE",
+ /* 271 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal",
+ /* 272 */ "stream_options ::= stream_options WATERMARK duration_literal",
+ /* 273 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER",
+ /* 274 */ "cmd ::= KILL CONNECTION NK_INTEGER",
+ /* 275 */ "cmd ::= KILL QUERY NK_STRING",
+ /* 276 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
+ /* 277 */ "cmd ::= BALANCE VGROUP",
+ /* 278 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
+ /* 279 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list",
+ /* 280 */ "cmd ::= SPLIT VGROUP NK_INTEGER",
+ /* 281 */ "dnode_list ::= DNODE NK_INTEGER",
+ /* 282 */ "dnode_list ::= dnode_list DNODE NK_INTEGER",
+ /* 283 */ "cmd ::= DELETE FROM full_table_name where_clause_opt",
+ /* 284 */ "cmd ::= query_expression",
+ /* 285 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression",
+ /* 286 */ "cmd ::= INSERT INTO full_table_name query_expression",
+ /* 287 */ "literal ::= NK_INTEGER",
+ /* 288 */ "literal ::= NK_FLOAT",
+ /* 289 */ "literal ::= NK_STRING",
+ /* 290 */ "literal ::= NK_BOOL",
+ /* 291 */ "literal ::= TIMESTAMP NK_STRING",
+ /* 292 */ "literal ::= duration_literal",
+ /* 293 */ "literal ::= NULL",
+ /* 294 */ "literal ::= NK_QUESTION",
+ /* 295 */ "duration_literal ::= NK_VARIABLE",
+ /* 296 */ "signed ::= NK_INTEGER",
+ /* 297 */ "signed ::= NK_PLUS NK_INTEGER",
+ /* 298 */ "signed ::= NK_MINUS NK_INTEGER",
+ /* 299 */ "signed ::= NK_FLOAT",
+ /* 300 */ "signed ::= NK_PLUS NK_FLOAT",
+ /* 301 */ "signed ::= NK_MINUS NK_FLOAT",
+ /* 302 */ "signed_literal ::= signed",
+ /* 303 */ "signed_literal ::= NK_STRING",
+ /* 304 */ "signed_literal ::= NK_BOOL",
+ /* 305 */ "signed_literal ::= TIMESTAMP NK_STRING",
+ /* 306 */ "signed_literal ::= duration_literal",
+ /* 307 */ "signed_literal ::= NULL",
+ /* 308 */ "signed_literal ::= literal_func",
+ /* 309 */ "signed_literal ::= NK_QUESTION",
+ /* 310 */ "literal_list ::= signed_literal",
+ /* 311 */ "literal_list ::= literal_list NK_COMMA signed_literal",
+ /* 312 */ "db_name ::= NK_ID",
+ /* 313 */ "table_name ::= NK_ID",
+ /* 314 */ "column_name ::= NK_ID",
+ /* 315 */ "function_name ::= NK_ID",
+ /* 316 */ "table_alias ::= NK_ID",
+ /* 317 */ "column_alias ::= NK_ID",
+ /* 318 */ "user_name ::= NK_ID",
+ /* 319 */ "topic_name ::= NK_ID",
+ /* 320 */ "stream_name ::= NK_ID",
+ /* 321 */ "cgroup_name ::= NK_ID",
+ /* 322 */ "expression ::= literal",
+ /* 323 */ "expression ::= pseudo_column",
+ /* 324 */ "expression ::= column_reference",
+ /* 325 */ "expression ::= function_expression",
+ /* 326 */ "expression ::= subquery",
+ /* 327 */ "expression ::= NK_LP expression NK_RP",
+ /* 328 */ "expression ::= NK_PLUS expression",
+ /* 329 */ "expression ::= NK_MINUS expression",
+ /* 330 */ "expression ::= expression NK_PLUS expression",
+ /* 331 */ "expression ::= expression NK_MINUS expression",
+ /* 332 */ "expression ::= expression NK_STAR expression",
+ /* 333 */ "expression ::= expression NK_SLASH expression",
+ /* 334 */ "expression ::= expression NK_REM expression",
+ /* 335 */ "expression ::= column_reference NK_ARROW NK_STRING",
+ /* 336 */ "expression ::= expression NK_BITAND expression",
+ /* 337 */ "expression ::= expression NK_BITOR expression",
+ /* 338 */ "expression_list ::= expression",
+ /* 339 */ "expression_list ::= expression_list NK_COMMA expression",
+ /* 340 */ "column_reference ::= column_name",
+ /* 341 */ "column_reference ::= table_name NK_DOT column_name",
+ /* 342 */ "pseudo_column ::= ROWTS",
+ /* 343 */ "pseudo_column ::= TBNAME",
+ /* 344 */ "pseudo_column ::= table_name NK_DOT TBNAME",
+ /* 345 */ "pseudo_column ::= QSTART",
+ /* 346 */ "pseudo_column ::= QEND",
+ /* 347 */ "pseudo_column ::= QDURATION",
+ /* 348 */ "pseudo_column ::= WSTART",
+ /* 349 */ "pseudo_column ::= WEND",
+ /* 350 */ "pseudo_column ::= WDURATION",
+ /* 351 */ "function_expression ::= function_name NK_LP expression_list NK_RP",
+ /* 352 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP",
+ /* 353 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP",
+ /* 354 */ "function_expression ::= literal_func",
+ /* 355 */ "literal_func ::= noarg_func NK_LP NK_RP",
+ /* 356 */ "literal_func ::= NOW",
+ /* 357 */ "noarg_func ::= NOW",
+ /* 358 */ "noarg_func ::= TODAY",
+ /* 359 */ "noarg_func ::= TIMEZONE",
+ /* 360 */ "noarg_func ::= DATABASE",
+ /* 361 */ "noarg_func ::= CLIENT_VERSION",
+ /* 362 */ "noarg_func ::= SERVER_VERSION",
+ /* 363 */ "noarg_func ::= SERVER_STATUS",
+ /* 364 */ "noarg_func ::= CURRENT_USER",
+ /* 365 */ "noarg_func ::= USER",
+ /* 366 */ "star_func ::= COUNT",
+ /* 367 */ "star_func ::= FIRST",
+ /* 368 */ "star_func ::= LAST",
+ /* 369 */ "star_func ::= LAST_ROW",
+ /* 370 */ "star_func_para_list ::= NK_STAR",
+ /* 371 */ "star_func_para_list ::= other_para_list",
+ /* 372 */ "other_para_list ::= star_func_para",
+ /* 373 */ "other_para_list ::= other_para_list NK_COMMA star_func_para",
+ /* 374 */ "star_func_para ::= expression",
+ /* 375 */ "star_func_para ::= table_name NK_DOT NK_STAR",
+ /* 376 */ "predicate ::= expression compare_op expression",
+ /* 377 */ "predicate ::= expression BETWEEN expression AND expression",
+ /* 378 */ "predicate ::= expression NOT BETWEEN expression AND expression",
+ /* 379 */ "predicate ::= expression IS NULL",
+ /* 380 */ "predicate ::= expression IS NOT NULL",
+ /* 381 */ "predicate ::= expression in_op in_predicate_value",
+ /* 382 */ "compare_op ::= NK_LT",
+ /* 383 */ "compare_op ::= NK_GT",
+ /* 384 */ "compare_op ::= NK_LE",
+ /* 385 */ "compare_op ::= NK_GE",
+ /* 386 */ "compare_op ::= NK_NE",
+ /* 387 */ "compare_op ::= NK_EQ",
+ /* 388 */ "compare_op ::= LIKE",
+ /* 389 */ "compare_op ::= NOT LIKE",
+ /* 390 */ "compare_op ::= MATCH",
+ /* 391 */ "compare_op ::= NMATCH",
+ /* 392 */ "compare_op ::= CONTAINS",
+ /* 393 */ "in_op ::= IN",
+ /* 394 */ "in_op ::= NOT IN",
+ /* 395 */ "in_predicate_value ::= NK_LP literal_list NK_RP",
+ /* 396 */ "boolean_value_expression ::= boolean_primary",
+ /* 397 */ "boolean_value_expression ::= NOT boolean_primary",
+ /* 398 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression",
+ /* 399 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression",
+ /* 400 */ "boolean_primary ::= predicate",
+ /* 401 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP",
+ /* 402 */ "common_expression ::= expression",
+ /* 403 */ "common_expression ::= boolean_value_expression",
+ /* 404 */ "from_clause_opt ::=",
+ /* 405 */ "from_clause_opt ::= FROM table_reference_list",
+ /* 406 */ "table_reference_list ::= table_reference",
+ /* 407 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference",
+ /* 408 */ "table_reference ::= table_primary",
+ /* 409 */ "table_reference ::= joined_table",
+ /* 410 */ "table_primary ::= table_name alias_opt",
+ /* 411 */ "table_primary ::= db_name NK_DOT table_name alias_opt",
+ /* 412 */ "table_primary ::= subquery alias_opt",
+ /* 413 */ "table_primary ::= parenthesized_joined_table",
+ /* 414 */ "alias_opt ::=",
+ /* 415 */ "alias_opt ::= table_alias",
+ /* 416 */ "alias_opt ::= AS table_alias",
+ /* 417 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP",
+ /* 418 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP",
+ /* 419 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition",
+ /* 420 */ "join_type ::=",
+ /* 421 */ "join_type ::= INNER",
+ /* 422 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt",
+ /* 423 */ "set_quantifier_opt ::=",
+ /* 424 */ "set_quantifier_opt ::= DISTINCT",
+ /* 425 */ "set_quantifier_opt ::= ALL",
+ /* 426 */ "select_list ::= select_item",
+ /* 427 */ "select_list ::= select_list NK_COMMA select_item",
+ /* 428 */ "select_item ::= NK_STAR",
+ /* 429 */ "select_item ::= common_expression",
+ /* 430 */ "select_item ::= common_expression column_alias",
+ /* 431 */ "select_item ::= common_expression AS column_alias",
+ /* 432 */ "select_item ::= table_name NK_DOT NK_STAR",
+ /* 433 */ "where_clause_opt ::=",
+ /* 434 */ "where_clause_opt ::= WHERE search_condition",
+ /* 435 */ "partition_by_clause_opt ::=",
+ /* 436 */ "partition_by_clause_opt ::= PARTITION BY expression_list",
+ /* 437 */ "twindow_clause_opt ::=",
+ /* 438 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP",
+ /* 439 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP",
+ /* 440 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt",
+ /* 441 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt",
+ /* 442 */ "sliding_opt ::=",
+ /* 443 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP",
+ /* 444 */ "fill_opt ::=",
+ /* 445 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP",
+ /* 446 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP",
+ /* 447 */ "fill_mode ::= NONE",
+ /* 448 */ "fill_mode ::= PREV",
+ /* 449 */ "fill_mode ::= NULL",
+ /* 450 */ "fill_mode ::= LINEAR",
+ /* 451 */ "fill_mode ::= NEXT",
+ /* 452 */ "group_by_clause_opt ::=",
+ /* 453 */ "group_by_clause_opt ::= GROUP BY group_by_list",
+ /* 454 */ "group_by_list ::= expression",
+ /* 455 */ "group_by_list ::= group_by_list NK_COMMA expression",
+ /* 456 */ "having_clause_opt ::=",
+ /* 457 */ "having_clause_opt ::= HAVING search_condition",
+ /* 458 */ "range_opt ::=",
+ /* 459 */ "range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP",
+ /* 460 */ "every_opt ::=",
+ /* 461 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP",
+ /* 462 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt",
+ /* 463 */ "query_expression_body ::= query_primary",
+ /* 464 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body",
+ /* 465 */ "query_expression_body ::= query_expression_body UNION query_expression_body",
+ /* 466 */ "query_primary ::= query_specification",
+ /* 467 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP",
+ /* 468 */ "order_by_clause_opt ::=",
+ /* 469 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
+ /* 470 */ "slimit_clause_opt ::=",
+ /* 471 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
+ /* 472 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
+ /* 473 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 474 */ "limit_clause_opt ::=",
+ /* 475 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
+ /* 476 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
+ /* 477 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 478 */ "subquery ::= NK_LP query_expression NK_RP",
+ /* 479 */ "search_condition ::= common_expression",
+ /* 480 */ "sort_specification_list ::= sort_specification",
+ /* 481 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
+ /* 482 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt",
+ /* 483 */ "ordering_specification_opt ::=",
+ /* 484 */ "ordering_specification_opt ::= ASC",
+ /* 485 */ "ordering_specification_opt ::= DESC",
+ /* 486 */ "null_ordering_opt ::=",
+ /* 487 */ "null_ordering_opt ::= NULLS FIRST",
+ /* 488 */ "null_ordering_opt ::= NULLS LAST",
};
#endif /* NDEBUG */
@@ -2393,42 +2346,41 @@ static void yy_destructor(
case 362: /* stream_options */
case 364: /* query_expression */
case 367: /* explain_options */
- case 371: /* into_opt */
- case 373: /* where_clause_opt */
- case 374: /* signed */
- case 375: /* literal_func */
- case 379: /* expression */
- case 380: /* pseudo_column */
- case 381: /* column_reference */
- case 382: /* function_expression */
- case 383: /* subquery */
- case 388: /* star_func_para */
- case 389: /* predicate */
- case 392: /* in_predicate_value */
- case 393: /* boolean_value_expression */
- case 394: /* boolean_primary */
- case 395: /* common_expression */
- case 396: /* from_clause_opt */
- case 397: /* table_reference_list */
- case 398: /* table_reference */
- case 399: /* table_primary */
- case 400: /* joined_table */
- case 402: /* parenthesized_joined_table */
- case 404: /* search_condition */
- case 405: /* query_specification */
- case 409: /* range_opt */
- case 410: /* every_opt */
- case 411: /* fill_opt */
- case 412: /* twindow_clause_opt */
- case 414: /* having_clause_opt */
- case 415: /* select_item */
- case 418: /* query_expression_body */
- case 420: /* slimit_clause_opt */
- case 421: /* limit_clause_opt */
- case 422: /* query_primary */
- case 424: /* sort_specification */
+ case 372: /* where_clause_opt */
+ case 373: /* signed */
+ case 374: /* literal_func */
+ case 378: /* expression */
+ case 379: /* pseudo_column */
+ case 380: /* column_reference */
+ case 381: /* function_expression */
+ case 382: /* subquery */
+ case 387: /* star_func_para */
+ case 388: /* predicate */
+ case 391: /* in_predicate_value */
+ case 392: /* boolean_value_expression */
+ case 393: /* boolean_primary */
+ case 394: /* common_expression */
+ case 395: /* from_clause_opt */
+ case 396: /* table_reference_list */
+ case 397: /* table_reference */
+ case 398: /* table_primary */
+ case 399: /* joined_table */
+ case 401: /* parenthesized_joined_table */
+ case 403: /* search_condition */
+ case 404: /* query_specification */
+ case 408: /* range_opt */
+ case 409: /* every_opt */
+ case 410: /* fill_opt */
+ case 411: /* twindow_clause_opt */
+ case 413: /* having_clause_opt */
+ case 414: /* select_item */
+ case 417: /* query_expression_body */
+ case 419: /* slimit_clause_opt */
+ case 420: /* limit_clause_opt */
+ case 421: /* query_primary */
+ case 423: /* sort_specification */
{
- nodesDestroyNode((yypminor->yy840));
+ nodesDestroyNode((yypminor->yy272));
}
break;
case 306: /* account_options */
@@ -2449,11 +2401,11 @@ static void yy_destructor(
case 363: /* topic_name */
case 365: /* cgroup_name */
case 370: /* stream_name */
- case 377: /* table_alias */
- case 378: /* column_alias */
- case 384: /* star_func */
- case 386: /* noarg_func */
- case 401: /* alias_opt */
+ case 376: /* table_alias */
+ case 377: /* column_alias */
+ case 383: /* star_func */
+ case 385: /* noarg_func */
+ case 400: /* alias_opt */
{
}
@@ -2474,7 +2426,7 @@ static void yy_destructor(
case 320: /* exists_opt */
case 366: /* analyze_opt */
case 368: /* agg_func_opt */
- case 406: /* set_quantifier_opt */
+ case 405: /* set_quantifier_opt */
{
}
@@ -2493,18 +2445,18 @@ static void yy_destructor(
case 346: /* duration_list */
case 347: /* rollup_func_list */
case 358: /* func_list */
- case 372: /* dnode_list */
- case 376: /* literal_list */
- case 385: /* star_func_para_list */
- case 387: /* other_para_list */
- case 407: /* select_list */
- case 408: /* partition_by_clause_opt */
- case 413: /* group_by_clause_opt */
- case 417: /* group_by_list */
- case 419: /* order_by_clause_opt */
- case 423: /* sort_specification_list */
+ case 371: /* dnode_list */
+ case 375: /* literal_list */
+ case 384: /* star_func_para_list */
+ case 386: /* other_para_list */
+ case 406: /* select_list */
+ case 407: /* partition_by_clause_opt */
+ case 412: /* group_by_clause_opt */
+ case 416: /* group_by_list */
+ case 418: /* order_by_clause_opt */
+ case 422: /* sort_specification_list */
{
- nodesDestroyList((yypminor->yy544));
+ nodesDestroyList((yypminor->yy172));
}
break;
case 325: /* alter_db_option */
@@ -2518,28 +2470,28 @@ static void yy_destructor(
}
break;
- case 390: /* compare_op */
- case 391: /* in_op */
+ case 389: /* compare_op */
+ case 390: /* in_op */
{
}
break;
- case 403: /* join_type */
+ case 402: /* join_type */
{
}
break;
- case 416: /* fill_mode */
+ case 415: /* fill_mode */
{
}
break;
- case 425: /* ordering_specification_opt */
+ case 424: /* ordering_specification_opt */
{
}
break;
- case 426: /* null_ordering_opt */
+ case 425: /* null_ordering_opt */
{
}
@@ -3104,231 +3056,229 @@ static const struct {
{ 368, -1 }, /* (263) agg_func_opt ::= AGGREGATE */
{ 369, 0 }, /* (264) bufsize_opt ::= */
{ 369, -2 }, /* (265) bufsize_opt ::= BUFSIZE NK_INTEGER */
- { 305, -8 }, /* (266) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */
+ { 305, -9 }, /* (266) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression */
{ 305, -4 }, /* (267) cmd ::= DROP STREAM exists_opt stream_name */
- { 371, 0 }, /* (268) into_opt ::= */
- { 371, -2 }, /* (269) into_opt ::= INTO full_table_name */
- { 362, 0 }, /* (270) stream_options ::= */
- { 362, -3 }, /* (271) stream_options ::= stream_options TRIGGER AT_ONCE */
- { 362, -3 }, /* (272) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
- { 362, -4 }, /* (273) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
- { 362, -3 }, /* (274) stream_options ::= stream_options WATERMARK duration_literal */
- { 362, -4 }, /* (275) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
- { 305, -3 }, /* (276) cmd ::= KILL CONNECTION NK_INTEGER */
- { 305, -3 }, /* (277) cmd ::= KILL QUERY NK_STRING */
- { 305, -3 }, /* (278) cmd ::= KILL TRANSACTION NK_INTEGER */
- { 305, -2 }, /* (279) cmd ::= BALANCE VGROUP */
- { 305, -4 }, /* (280) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
- { 305, -4 }, /* (281) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
- { 305, -3 }, /* (282) cmd ::= SPLIT VGROUP NK_INTEGER */
- { 372, -2 }, /* (283) dnode_list ::= DNODE NK_INTEGER */
- { 372, -3 }, /* (284) dnode_list ::= dnode_list DNODE NK_INTEGER */
- { 305, -4 }, /* (285) cmd ::= DELETE FROM full_table_name where_clause_opt */
- { 305, -1 }, /* (286) cmd ::= query_expression */
- { 305, -7 }, /* (287) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
- { 305, -4 }, /* (288) cmd ::= INSERT INTO full_table_name query_expression */
- { 308, -1 }, /* (289) literal ::= NK_INTEGER */
- { 308, -1 }, /* (290) literal ::= NK_FLOAT */
- { 308, -1 }, /* (291) literal ::= NK_STRING */
- { 308, -1 }, /* (292) literal ::= NK_BOOL */
- { 308, -2 }, /* (293) literal ::= TIMESTAMP NK_STRING */
- { 308, -1 }, /* (294) literal ::= duration_literal */
- { 308, -1 }, /* (295) literal ::= NULL */
- { 308, -1 }, /* (296) literal ::= NK_QUESTION */
- { 349, -1 }, /* (297) duration_literal ::= NK_VARIABLE */
- { 374, -1 }, /* (298) signed ::= NK_INTEGER */
- { 374, -2 }, /* (299) signed ::= NK_PLUS NK_INTEGER */
- { 374, -2 }, /* (300) signed ::= NK_MINUS NK_INTEGER */
- { 374, -1 }, /* (301) signed ::= NK_FLOAT */
- { 374, -2 }, /* (302) signed ::= NK_PLUS NK_FLOAT */
- { 374, -2 }, /* (303) signed ::= NK_MINUS NK_FLOAT */
- { 338, -1 }, /* (304) signed_literal ::= signed */
- { 338, -1 }, /* (305) signed_literal ::= NK_STRING */
- { 338, -1 }, /* (306) signed_literal ::= NK_BOOL */
- { 338, -2 }, /* (307) signed_literal ::= TIMESTAMP NK_STRING */
- { 338, -1 }, /* (308) signed_literal ::= duration_literal */
- { 338, -1 }, /* (309) signed_literal ::= NULL */
- { 338, -1 }, /* (310) signed_literal ::= literal_func */
- { 338, -1 }, /* (311) signed_literal ::= NK_QUESTION */
- { 376, -1 }, /* (312) literal_list ::= signed_literal */
- { 376, -3 }, /* (313) literal_list ::= literal_list NK_COMMA signed_literal */
- { 316, -1 }, /* (314) db_name ::= NK_ID */
- { 344, -1 }, /* (315) table_name ::= NK_ID */
- { 336, -1 }, /* (316) column_name ::= NK_ID */
- { 351, -1 }, /* (317) function_name ::= NK_ID */
- { 377, -1 }, /* (318) table_alias ::= NK_ID */
- { 378, -1 }, /* (319) column_alias ::= NK_ID */
- { 310, -1 }, /* (320) user_name ::= NK_ID */
- { 363, -1 }, /* (321) topic_name ::= NK_ID */
- { 370, -1 }, /* (322) stream_name ::= NK_ID */
- { 365, -1 }, /* (323) cgroup_name ::= NK_ID */
- { 379, -1 }, /* (324) expression ::= literal */
- { 379, -1 }, /* (325) expression ::= pseudo_column */
- { 379, -1 }, /* (326) expression ::= column_reference */
- { 379, -1 }, /* (327) expression ::= function_expression */
- { 379, -1 }, /* (328) expression ::= subquery */
- { 379, -3 }, /* (329) expression ::= NK_LP expression NK_RP */
- { 379, -2 }, /* (330) expression ::= NK_PLUS expression */
- { 379, -2 }, /* (331) expression ::= NK_MINUS expression */
- { 379, -3 }, /* (332) expression ::= expression NK_PLUS expression */
- { 379, -3 }, /* (333) expression ::= expression NK_MINUS expression */
- { 379, -3 }, /* (334) expression ::= expression NK_STAR expression */
- { 379, -3 }, /* (335) expression ::= expression NK_SLASH expression */
- { 379, -3 }, /* (336) expression ::= expression NK_REM expression */
- { 379, -3 }, /* (337) expression ::= column_reference NK_ARROW NK_STRING */
- { 379, -3 }, /* (338) expression ::= expression NK_BITAND expression */
- { 379, -3 }, /* (339) expression ::= expression NK_BITOR expression */
- { 341, -1 }, /* (340) expression_list ::= expression */
- { 341, -3 }, /* (341) expression_list ::= expression_list NK_COMMA expression */
- { 381, -1 }, /* (342) column_reference ::= column_name */
- { 381, -3 }, /* (343) column_reference ::= table_name NK_DOT column_name */
- { 380, -1 }, /* (344) pseudo_column ::= ROWTS */
- { 380, -1 }, /* (345) pseudo_column ::= TBNAME */
- { 380, -3 }, /* (346) pseudo_column ::= table_name NK_DOT TBNAME */
- { 380, -1 }, /* (347) pseudo_column ::= QSTART */
- { 380, -1 }, /* (348) pseudo_column ::= QEND */
- { 380, -1 }, /* (349) pseudo_column ::= QDURATION */
- { 380, -1 }, /* (350) pseudo_column ::= WSTART */
- { 380, -1 }, /* (351) pseudo_column ::= WEND */
- { 380, -1 }, /* (352) pseudo_column ::= WDURATION */
- { 382, -4 }, /* (353) function_expression ::= function_name NK_LP expression_list NK_RP */
- { 382, -4 }, /* (354) function_expression ::= star_func NK_LP star_func_para_list NK_RP */
- { 382, -6 }, /* (355) function_expression ::= CAST NK_LP expression AS type_name NK_RP */
- { 382, -1 }, /* (356) function_expression ::= literal_func */
- { 375, -3 }, /* (357) literal_func ::= noarg_func NK_LP NK_RP */
- { 375, -1 }, /* (358) literal_func ::= NOW */
- { 386, -1 }, /* (359) noarg_func ::= NOW */
- { 386, -1 }, /* (360) noarg_func ::= TODAY */
- { 386, -1 }, /* (361) noarg_func ::= TIMEZONE */
- { 386, -1 }, /* (362) noarg_func ::= DATABASE */
- { 386, -1 }, /* (363) noarg_func ::= CLIENT_VERSION */
- { 386, -1 }, /* (364) noarg_func ::= SERVER_VERSION */
- { 386, -1 }, /* (365) noarg_func ::= SERVER_STATUS */
- { 386, -1 }, /* (366) noarg_func ::= CURRENT_USER */
- { 386, -1 }, /* (367) noarg_func ::= USER */
- { 384, -1 }, /* (368) star_func ::= COUNT */
- { 384, -1 }, /* (369) star_func ::= FIRST */
- { 384, -1 }, /* (370) star_func ::= LAST */
- { 384, -1 }, /* (371) star_func ::= LAST_ROW */
- { 385, -1 }, /* (372) star_func_para_list ::= NK_STAR */
- { 385, -1 }, /* (373) star_func_para_list ::= other_para_list */
- { 387, -1 }, /* (374) other_para_list ::= star_func_para */
- { 387, -3 }, /* (375) other_para_list ::= other_para_list NK_COMMA star_func_para */
- { 388, -1 }, /* (376) star_func_para ::= expression */
- { 388, -3 }, /* (377) star_func_para ::= table_name NK_DOT NK_STAR */
- { 389, -3 }, /* (378) predicate ::= expression compare_op expression */
- { 389, -5 }, /* (379) predicate ::= expression BETWEEN expression AND expression */
- { 389, -6 }, /* (380) predicate ::= expression NOT BETWEEN expression AND expression */
- { 389, -3 }, /* (381) predicate ::= expression IS NULL */
- { 389, -4 }, /* (382) predicate ::= expression IS NOT NULL */
- { 389, -3 }, /* (383) predicate ::= expression in_op in_predicate_value */
- { 390, -1 }, /* (384) compare_op ::= NK_LT */
- { 390, -1 }, /* (385) compare_op ::= NK_GT */
- { 390, -1 }, /* (386) compare_op ::= NK_LE */
- { 390, -1 }, /* (387) compare_op ::= NK_GE */
- { 390, -1 }, /* (388) compare_op ::= NK_NE */
- { 390, -1 }, /* (389) compare_op ::= NK_EQ */
- { 390, -1 }, /* (390) compare_op ::= LIKE */
- { 390, -2 }, /* (391) compare_op ::= NOT LIKE */
- { 390, -1 }, /* (392) compare_op ::= MATCH */
- { 390, -1 }, /* (393) compare_op ::= NMATCH */
- { 390, -1 }, /* (394) compare_op ::= CONTAINS */
- { 391, -1 }, /* (395) in_op ::= IN */
- { 391, -2 }, /* (396) in_op ::= NOT IN */
- { 392, -3 }, /* (397) in_predicate_value ::= NK_LP literal_list NK_RP */
- { 393, -1 }, /* (398) boolean_value_expression ::= boolean_primary */
- { 393, -2 }, /* (399) boolean_value_expression ::= NOT boolean_primary */
- { 393, -3 }, /* (400) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
- { 393, -3 }, /* (401) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
- { 394, -1 }, /* (402) boolean_primary ::= predicate */
- { 394, -3 }, /* (403) boolean_primary ::= NK_LP boolean_value_expression NK_RP */
- { 395, -1 }, /* (404) common_expression ::= expression */
- { 395, -1 }, /* (405) common_expression ::= boolean_value_expression */
- { 396, 0 }, /* (406) from_clause_opt ::= */
- { 396, -2 }, /* (407) from_clause_opt ::= FROM table_reference_list */
- { 397, -1 }, /* (408) table_reference_list ::= table_reference */
- { 397, -3 }, /* (409) table_reference_list ::= table_reference_list NK_COMMA table_reference */
- { 398, -1 }, /* (410) table_reference ::= table_primary */
- { 398, -1 }, /* (411) table_reference ::= joined_table */
- { 399, -2 }, /* (412) table_primary ::= table_name alias_opt */
- { 399, -4 }, /* (413) table_primary ::= db_name NK_DOT table_name alias_opt */
- { 399, -2 }, /* (414) table_primary ::= subquery alias_opt */
- { 399, -1 }, /* (415) table_primary ::= parenthesized_joined_table */
- { 401, 0 }, /* (416) alias_opt ::= */
- { 401, -1 }, /* (417) alias_opt ::= table_alias */
- { 401, -2 }, /* (418) alias_opt ::= AS table_alias */
- { 402, -3 }, /* (419) parenthesized_joined_table ::= NK_LP joined_table NK_RP */
- { 402, -3 }, /* (420) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */
- { 400, -6 }, /* (421) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
- { 403, 0 }, /* (422) join_type ::= */
- { 403, -1 }, /* (423) join_type ::= INNER */
- { 405, -12 }, /* (424) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
- { 406, 0 }, /* (425) set_quantifier_opt ::= */
- { 406, -1 }, /* (426) set_quantifier_opt ::= DISTINCT */
- { 406, -1 }, /* (427) set_quantifier_opt ::= ALL */
- { 407, -1 }, /* (428) select_list ::= select_item */
- { 407, -3 }, /* (429) select_list ::= select_list NK_COMMA select_item */
- { 415, -1 }, /* (430) select_item ::= NK_STAR */
- { 415, -1 }, /* (431) select_item ::= common_expression */
- { 415, -2 }, /* (432) select_item ::= common_expression column_alias */
- { 415, -3 }, /* (433) select_item ::= common_expression AS column_alias */
- { 415, -3 }, /* (434) select_item ::= table_name NK_DOT NK_STAR */
- { 373, 0 }, /* (435) where_clause_opt ::= */
- { 373, -2 }, /* (436) where_clause_opt ::= WHERE search_condition */
- { 408, 0 }, /* (437) partition_by_clause_opt ::= */
- { 408, -3 }, /* (438) partition_by_clause_opt ::= PARTITION BY expression_list */
- { 412, 0 }, /* (439) twindow_clause_opt ::= */
- { 412, -6 }, /* (440) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
- { 412, -4 }, /* (441) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
- { 412, -6 }, /* (442) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
- { 412, -8 }, /* (443) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
- { 359, 0 }, /* (444) sliding_opt ::= */
- { 359, -4 }, /* (445) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
- { 411, 0 }, /* (446) fill_opt ::= */
- { 411, -4 }, /* (447) fill_opt ::= FILL NK_LP fill_mode NK_RP */
- { 411, -6 }, /* (448) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
- { 416, -1 }, /* (449) fill_mode ::= NONE */
- { 416, -1 }, /* (450) fill_mode ::= PREV */
- { 416, -1 }, /* (451) fill_mode ::= NULL */
- { 416, -1 }, /* (452) fill_mode ::= LINEAR */
- { 416, -1 }, /* (453) fill_mode ::= NEXT */
- { 413, 0 }, /* (454) group_by_clause_opt ::= */
- { 413, -3 }, /* (455) group_by_clause_opt ::= GROUP BY group_by_list */
- { 417, -1 }, /* (456) group_by_list ::= expression */
- { 417, -3 }, /* (457) group_by_list ::= group_by_list NK_COMMA expression */
- { 414, 0 }, /* (458) having_clause_opt ::= */
- { 414, -2 }, /* (459) having_clause_opt ::= HAVING search_condition */
- { 409, 0 }, /* (460) range_opt ::= */
- { 409, -6 }, /* (461) range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
- { 410, 0 }, /* (462) every_opt ::= */
- { 410, -4 }, /* (463) every_opt ::= EVERY NK_LP duration_literal NK_RP */
- { 364, -4 }, /* (464) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
- { 418, -1 }, /* (465) query_expression_body ::= query_primary */
- { 418, -4 }, /* (466) query_expression_body ::= query_expression_body UNION ALL query_expression_body */
- { 418, -3 }, /* (467) query_expression_body ::= query_expression_body UNION query_expression_body */
- { 422, -1 }, /* (468) query_primary ::= query_specification */
- { 422, -6 }, /* (469) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
- { 419, 0 }, /* (470) order_by_clause_opt ::= */
- { 419, -3 }, /* (471) order_by_clause_opt ::= ORDER BY sort_specification_list */
- { 420, 0 }, /* (472) slimit_clause_opt ::= */
- { 420, -2 }, /* (473) slimit_clause_opt ::= SLIMIT NK_INTEGER */
- { 420, -4 }, /* (474) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- { 420, -4 }, /* (475) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- { 421, 0 }, /* (476) limit_clause_opt ::= */
- { 421, -2 }, /* (477) limit_clause_opt ::= LIMIT NK_INTEGER */
- { 421, -4 }, /* (478) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
- { 421, -4 }, /* (479) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- { 383, -3 }, /* (480) subquery ::= NK_LP query_expression NK_RP */
- { 404, -1 }, /* (481) search_condition ::= common_expression */
- { 423, -1 }, /* (482) sort_specification_list ::= sort_specification */
- { 423, -3 }, /* (483) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
- { 424, -3 }, /* (484) sort_specification ::= expression ordering_specification_opt null_ordering_opt */
- { 425, 0 }, /* (485) ordering_specification_opt ::= */
- { 425, -1 }, /* (486) ordering_specification_opt ::= ASC */
- { 425, -1 }, /* (487) ordering_specification_opt ::= DESC */
- { 426, 0 }, /* (488) null_ordering_opt ::= */
- { 426, -2 }, /* (489) null_ordering_opt ::= NULLS FIRST */
- { 426, -2 }, /* (490) null_ordering_opt ::= NULLS LAST */
+ { 362, 0 }, /* (268) stream_options ::= */
+ { 362, -3 }, /* (269) stream_options ::= stream_options TRIGGER AT_ONCE */
+ { 362, -3 }, /* (270) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
+ { 362, -4 }, /* (271) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
+ { 362, -3 }, /* (272) stream_options ::= stream_options WATERMARK duration_literal */
+ { 362, -4 }, /* (273) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
+ { 305, -3 }, /* (274) cmd ::= KILL CONNECTION NK_INTEGER */
+ { 305, -3 }, /* (275) cmd ::= KILL QUERY NK_STRING */
+ { 305, -3 }, /* (276) cmd ::= KILL TRANSACTION NK_INTEGER */
+ { 305, -2 }, /* (277) cmd ::= BALANCE VGROUP */
+ { 305, -4 }, /* (278) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
+ { 305, -4 }, /* (279) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
+ { 305, -3 }, /* (280) cmd ::= SPLIT VGROUP NK_INTEGER */
+ { 371, -2 }, /* (281) dnode_list ::= DNODE NK_INTEGER */
+ { 371, -3 }, /* (282) dnode_list ::= dnode_list DNODE NK_INTEGER */
+ { 305, -4 }, /* (283) cmd ::= DELETE FROM full_table_name where_clause_opt */
+ { 305, -1 }, /* (284) cmd ::= query_expression */
+ { 305, -7 }, /* (285) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
+ { 305, -4 }, /* (286) cmd ::= INSERT INTO full_table_name query_expression */
+ { 308, -1 }, /* (287) literal ::= NK_INTEGER */
+ { 308, -1 }, /* (288) literal ::= NK_FLOAT */
+ { 308, -1 }, /* (289) literal ::= NK_STRING */
+ { 308, -1 }, /* (290) literal ::= NK_BOOL */
+ { 308, -2 }, /* (291) literal ::= TIMESTAMP NK_STRING */
+ { 308, -1 }, /* (292) literal ::= duration_literal */
+ { 308, -1 }, /* (293) literal ::= NULL */
+ { 308, -1 }, /* (294) literal ::= NK_QUESTION */
+ { 349, -1 }, /* (295) duration_literal ::= NK_VARIABLE */
+ { 373, -1 }, /* (296) signed ::= NK_INTEGER */
+ { 373, -2 }, /* (297) signed ::= NK_PLUS NK_INTEGER */
+ { 373, -2 }, /* (298) signed ::= NK_MINUS NK_INTEGER */
+ { 373, -1 }, /* (299) signed ::= NK_FLOAT */
+ { 373, -2 }, /* (300) signed ::= NK_PLUS NK_FLOAT */
+ { 373, -2 }, /* (301) signed ::= NK_MINUS NK_FLOAT */
+ { 338, -1 }, /* (302) signed_literal ::= signed */
+ { 338, -1 }, /* (303) signed_literal ::= NK_STRING */
+ { 338, -1 }, /* (304) signed_literal ::= NK_BOOL */
+ { 338, -2 }, /* (305) signed_literal ::= TIMESTAMP NK_STRING */
+ { 338, -1 }, /* (306) signed_literal ::= duration_literal */
+ { 338, -1 }, /* (307) signed_literal ::= NULL */
+ { 338, -1 }, /* (308) signed_literal ::= literal_func */
+ { 338, -1 }, /* (309) signed_literal ::= NK_QUESTION */
+ { 375, -1 }, /* (310) literal_list ::= signed_literal */
+ { 375, -3 }, /* (311) literal_list ::= literal_list NK_COMMA signed_literal */
+ { 316, -1 }, /* (312) db_name ::= NK_ID */
+ { 344, -1 }, /* (313) table_name ::= NK_ID */
+ { 336, -1 }, /* (314) column_name ::= NK_ID */
+ { 351, -1 }, /* (315) function_name ::= NK_ID */
+ { 376, -1 }, /* (316) table_alias ::= NK_ID */
+ { 377, -1 }, /* (317) column_alias ::= NK_ID */
+ { 310, -1 }, /* (318) user_name ::= NK_ID */
+ { 363, -1 }, /* (319) topic_name ::= NK_ID */
+ { 370, -1 }, /* (320) stream_name ::= NK_ID */
+ { 365, -1 }, /* (321) cgroup_name ::= NK_ID */
+ { 378, -1 }, /* (322) expression ::= literal */
+ { 378, -1 }, /* (323) expression ::= pseudo_column */
+ { 378, -1 }, /* (324) expression ::= column_reference */
+ { 378, -1 }, /* (325) expression ::= function_expression */
+ { 378, -1 }, /* (326) expression ::= subquery */
+ { 378, -3 }, /* (327) expression ::= NK_LP expression NK_RP */
+ { 378, -2 }, /* (328) expression ::= NK_PLUS expression */
+ { 378, -2 }, /* (329) expression ::= NK_MINUS expression */
+ { 378, -3 }, /* (330) expression ::= expression NK_PLUS expression */
+ { 378, -3 }, /* (331) expression ::= expression NK_MINUS expression */
+ { 378, -3 }, /* (332) expression ::= expression NK_STAR expression */
+ { 378, -3 }, /* (333) expression ::= expression NK_SLASH expression */
+ { 378, -3 }, /* (334) expression ::= expression NK_REM expression */
+ { 378, -3 }, /* (335) expression ::= column_reference NK_ARROW NK_STRING */
+ { 378, -3 }, /* (336) expression ::= expression NK_BITAND expression */
+ { 378, -3 }, /* (337) expression ::= expression NK_BITOR expression */
+ { 341, -1 }, /* (338) expression_list ::= expression */
+ { 341, -3 }, /* (339) expression_list ::= expression_list NK_COMMA expression */
+ { 380, -1 }, /* (340) column_reference ::= column_name */
+ { 380, -3 }, /* (341) column_reference ::= table_name NK_DOT column_name */
+ { 379, -1 }, /* (342) pseudo_column ::= ROWTS */
+ { 379, -1 }, /* (343) pseudo_column ::= TBNAME */
+ { 379, -3 }, /* (344) pseudo_column ::= table_name NK_DOT TBNAME */
+ { 379, -1 }, /* (345) pseudo_column ::= QSTART */
+ { 379, -1 }, /* (346) pseudo_column ::= QEND */
+ { 379, -1 }, /* (347) pseudo_column ::= QDURATION */
+ { 379, -1 }, /* (348) pseudo_column ::= WSTART */
+ { 379, -1 }, /* (349) pseudo_column ::= WEND */
+ { 379, -1 }, /* (350) pseudo_column ::= WDURATION */
+ { 381, -4 }, /* (351) function_expression ::= function_name NK_LP expression_list NK_RP */
+ { 381, -4 }, /* (352) function_expression ::= star_func NK_LP star_func_para_list NK_RP */
+ { 381, -6 }, /* (353) function_expression ::= CAST NK_LP expression AS type_name NK_RP */
+ { 381, -1 }, /* (354) function_expression ::= literal_func */
+ { 374, -3 }, /* (355) literal_func ::= noarg_func NK_LP NK_RP */
+ { 374, -1 }, /* (356) literal_func ::= NOW */
+ { 385, -1 }, /* (357) noarg_func ::= NOW */
+ { 385, -1 }, /* (358) noarg_func ::= TODAY */
+ { 385, -1 }, /* (359) noarg_func ::= TIMEZONE */
+ { 385, -1 }, /* (360) noarg_func ::= DATABASE */
+ { 385, -1 }, /* (361) noarg_func ::= CLIENT_VERSION */
+ { 385, -1 }, /* (362) noarg_func ::= SERVER_VERSION */
+ { 385, -1 }, /* (363) noarg_func ::= SERVER_STATUS */
+ { 385, -1 }, /* (364) noarg_func ::= CURRENT_USER */
+ { 385, -1 }, /* (365) noarg_func ::= USER */
+ { 383, -1 }, /* (366) star_func ::= COUNT */
+ { 383, -1 }, /* (367) star_func ::= FIRST */
+ { 383, -1 }, /* (368) star_func ::= LAST */
+ { 383, -1 }, /* (369) star_func ::= LAST_ROW */
+ { 384, -1 }, /* (370) star_func_para_list ::= NK_STAR */
+ { 384, -1 }, /* (371) star_func_para_list ::= other_para_list */
+ { 386, -1 }, /* (372) other_para_list ::= star_func_para */
+ { 386, -3 }, /* (373) other_para_list ::= other_para_list NK_COMMA star_func_para */
+ { 387, -1 }, /* (374) star_func_para ::= expression */
+ { 387, -3 }, /* (375) star_func_para ::= table_name NK_DOT NK_STAR */
+ { 388, -3 }, /* (376) predicate ::= expression compare_op expression */
+ { 388, -5 }, /* (377) predicate ::= expression BETWEEN expression AND expression */
+ { 388, -6 }, /* (378) predicate ::= expression NOT BETWEEN expression AND expression */
+ { 388, -3 }, /* (379) predicate ::= expression IS NULL */
+ { 388, -4 }, /* (380) predicate ::= expression IS NOT NULL */
+ { 388, -3 }, /* (381) predicate ::= expression in_op in_predicate_value */
+ { 389, -1 }, /* (382) compare_op ::= NK_LT */
+ { 389, -1 }, /* (383) compare_op ::= NK_GT */
+ { 389, -1 }, /* (384) compare_op ::= NK_LE */
+ { 389, -1 }, /* (385) compare_op ::= NK_GE */
+ { 389, -1 }, /* (386) compare_op ::= NK_NE */
+ { 389, -1 }, /* (387) compare_op ::= NK_EQ */
+ { 389, -1 }, /* (388) compare_op ::= LIKE */
+ { 389, -2 }, /* (389) compare_op ::= NOT LIKE */
+ { 389, -1 }, /* (390) compare_op ::= MATCH */
+ { 389, -1 }, /* (391) compare_op ::= NMATCH */
+ { 389, -1 }, /* (392) compare_op ::= CONTAINS */
+ { 390, -1 }, /* (393) in_op ::= IN */
+ { 390, -2 }, /* (394) in_op ::= NOT IN */
+ { 391, -3 }, /* (395) in_predicate_value ::= NK_LP literal_list NK_RP */
+ { 392, -1 }, /* (396) boolean_value_expression ::= boolean_primary */
+ { 392, -2 }, /* (397) boolean_value_expression ::= NOT boolean_primary */
+ { 392, -3 }, /* (398) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
+ { 392, -3 }, /* (399) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
+ { 393, -1 }, /* (400) boolean_primary ::= predicate */
+ { 393, -3 }, /* (401) boolean_primary ::= NK_LP boolean_value_expression NK_RP */
+ { 394, -1 }, /* (402) common_expression ::= expression */
+ { 394, -1 }, /* (403) common_expression ::= boolean_value_expression */
+ { 395, 0 }, /* (404) from_clause_opt ::= */
+ { 395, -2 }, /* (405) from_clause_opt ::= FROM table_reference_list */
+ { 396, -1 }, /* (406) table_reference_list ::= table_reference */
+ { 396, -3 }, /* (407) table_reference_list ::= table_reference_list NK_COMMA table_reference */
+ { 397, -1 }, /* (408) table_reference ::= table_primary */
+ { 397, -1 }, /* (409) table_reference ::= joined_table */
+ { 398, -2 }, /* (410) table_primary ::= table_name alias_opt */
+ { 398, -4 }, /* (411) table_primary ::= db_name NK_DOT table_name alias_opt */
+ { 398, -2 }, /* (412) table_primary ::= subquery alias_opt */
+ { 398, -1 }, /* (413) table_primary ::= parenthesized_joined_table */
+ { 400, 0 }, /* (414) alias_opt ::= */
+ { 400, -1 }, /* (415) alias_opt ::= table_alias */
+ { 400, -2 }, /* (416) alias_opt ::= AS table_alias */
+ { 401, -3 }, /* (417) parenthesized_joined_table ::= NK_LP joined_table NK_RP */
+ { 401, -3 }, /* (418) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */
+ { 399, -6 }, /* (419) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
+ { 402, 0 }, /* (420) join_type ::= */
+ { 402, -1 }, /* (421) join_type ::= INNER */
+ { 404, -12 }, /* (422) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
+ { 405, 0 }, /* (423) set_quantifier_opt ::= */
+ { 405, -1 }, /* (424) set_quantifier_opt ::= DISTINCT */
+ { 405, -1 }, /* (425) set_quantifier_opt ::= ALL */
+ { 406, -1 }, /* (426) select_list ::= select_item */
+ { 406, -3 }, /* (427) select_list ::= select_list NK_COMMA select_item */
+ { 414, -1 }, /* (428) select_item ::= NK_STAR */
+ { 414, -1 }, /* (429) select_item ::= common_expression */
+ { 414, -2 }, /* (430) select_item ::= common_expression column_alias */
+ { 414, -3 }, /* (431) select_item ::= common_expression AS column_alias */
+ { 414, -3 }, /* (432) select_item ::= table_name NK_DOT NK_STAR */
+ { 372, 0 }, /* (433) where_clause_opt ::= */
+ { 372, -2 }, /* (434) where_clause_opt ::= WHERE search_condition */
+ { 407, 0 }, /* (435) partition_by_clause_opt ::= */
+ { 407, -3 }, /* (436) partition_by_clause_opt ::= PARTITION BY expression_list */
+ { 411, 0 }, /* (437) twindow_clause_opt ::= */
+ { 411, -6 }, /* (438) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
+ { 411, -4 }, /* (439) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
+ { 411, -6 }, /* (440) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
+ { 411, -8 }, /* (441) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
+ { 359, 0 }, /* (442) sliding_opt ::= */
+ { 359, -4 }, /* (443) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
+ { 410, 0 }, /* (444) fill_opt ::= */
+ { 410, -4 }, /* (445) fill_opt ::= FILL NK_LP fill_mode NK_RP */
+ { 410, -6 }, /* (446) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
+ { 415, -1 }, /* (447) fill_mode ::= NONE */
+ { 415, -1 }, /* (448) fill_mode ::= PREV */
+ { 415, -1 }, /* (449) fill_mode ::= NULL */
+ { 415, -1 }, /* (450) fill_mode ::= LINEAR */
+ { 415, -1 }, /* (451) fill_mode ::= NEXT */
+ { 412, 0 }, /* (452) group_by_clause_opt ::= */
+ { 412, -3 }, /* (453) group_by_clause_opt ::= GROUP BY group_by_list */
+ { 416, -1 }, /* (454) group_by_list ::= expression */
+ { 416, -3 }, /* (455) group_by_list ::= group_by_list NK_COMMA expression */
+ { 413, 0 }, /* (456) having_clause_opt ::= */
+ { 413, -2 }, /* (457) having_clause_opt ::= HAVING search_condition */
+ { 408, 0 }, /* (458) range_opt ::= */
+ { 408, -6 }, /* (459) range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
+ { 409, 0 }, /* (460) every_opt ::= */
+ { 409, -4 }, /* (461) every_opt ::= EVERY NK_LP duration_literal NK_RP */
+ { 364, -4 }, /* (462) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ { 417, -1 }, /* (463) query_expression_body ::= query_primary */
+ { 417, -4 }, /* (464) query_expression_body ::= query_expression_body UNION ALL query_expression_body */
+ { 417, -3 }, /* (465) query_expression_body ::= query_expression_body UNION query_expression_body */
+ { 421, -1 }, /* (466) query_primary ::= query_specification */
+ { 421, -6 }, /* (467) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
+ { 418, 0 }, /* (468) order_by_clause_opt ::= */
+ { 418, -3 }, /* (469) order_by_clause_opt ::= ORDER BY sort_specification_list */
+ { 419, 0 }, /* (470) slimit_clause_opt ::= */
+ { 419, -2 }, /* (471) slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ { 419, -4 }, /* (472) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ { 419, -4 }, /* (473) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ { 420, 0 }, /* (474) limit_clause_opt ::= */
+ { 420, -2 }, /* (475) limit_clause_opt ::= LIMIT NK_INTEGER */
+ { 420, -4 }, /* (476) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
+ { 420, -4 }, /* (477) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ { 382, -3 }, /* (478) subquery ::= NK_LP query_expression NK_RP */
+ { 403, -1 }, /* (479) search_condition ::= common_expression */
+ { 422, -1 }, /* (480) sort_specification_list ::= sort_specification */
+ { 422, -3 }, /* (481) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
+ { 423, -3 }, /* (482) sort_specification ::= expression ordering_specification_opt null_ordering_opt */
+ { 424, 0 }, /* (483) ordering_specification_opt ::= */
+ { 424, -1 }, /* (484) ordering_specification_opt ::= ASC */
+ { 424, -1 }, /* (485) ordering_specification_opt ::= DESC */
+ { 425, 0 }, /* (486) null_ordering_opt ::= */
+ { 425, -2 }, /* (487) null_ordering_opt ::= NULLS FIRST */
+ { 425, -2 }, /* (488) null_ordering_opt ::= NULLS LAST */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -3465,69 +3415,69 @@ static YYACTIONTYPE yy_reduce(
yy_destructor(yypParser,308,&yymsp[0].minor);
break;
case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */
-{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy617, &yymsp[-1].minor.yy0, yymsp[0].minor.yy215); }
+{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy209, &yymsp[-1].minor.yy0, yymsp[0].minor.yy59); }
break;
case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); }
break;
case 26: /* cmd ::= ALTER USER user_name ENABLE NK_INTEGER */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); }
break;
case 27: /* cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); }
break;
case 28: /* cmd ::= DROP USER user_name */
-{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy209); }
break;
case 29: /* sysinfo_opt ::= */
-{ yymsp[1].minor.yy215 = 1; }
+{ yymsp[1].minor.yy59 = 1; }
break;
case 30: /* sysinfo_opt ::= SYSINFO NK_INTEGER */
-{ yymsp[-1].minor.yy215 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); }
+{ yymsp[-1].minor.yy59 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); }
break;
case 31: /* cmd ::= GRANT privileges ON priv_level TO user_name */
-{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy473, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy69, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); }
break;
case 32: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */
-{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy473, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy69, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); }
break;
case 33: /* privileges ::= ALL */
-{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_ALL; }
+{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_ALL; }
break;
case 34: /* privileges ::= priv_type_list */
case 35: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==35);
-{ yylhsminor.yy473 = yymsp[0].minor.yy473; }
- yymsp[0].minor.yy473 = yylhsminor.yy473;
+{ yylhsminor.yy69 = yymsp[0].minor.yy69; }
+ yymsp[0].minor.yy69 = yylhsminor.yy69;
break;
case 36: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */
-{ yylhsminor.yy473 = yymsp[-2].minor.yy473 | yymsp[0].minor.yy473; }
- yymsp[-2].minor.yy473 = yylhsminor.yy473;
+{ yylhsminor.yy69 = yymsp[-2].minor.yy69 | yymsp[0].minor.yy69; }
+ yymsp[-2].minor.yy69 = yylhsminor.yy69;
break;
case 37: /* priv_type ::= READ */
-{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_READ; }
+{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_READ; }
break;
case 38: /* priv_type ::= WRITE */
-{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_WRITE; }
+{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_WRITE; }
break;
case 39: /* priv_level ::= NK_STAR NK_DOT NK_STAR */
-{ yylhsminor.yy617 = yymsp[-2].minor.yy0; }
- yymsp[-2].minor.yy617 = yylhsminor.yy617;
+{ yylhsminor.yy209 = yymsp[-2].minor.yy0; }
+ yymsp[-2].minor.yy209 = yylhsminor.yy209;
break;
case 40: /* priv_level ::= db_name NK_DOT NK_STAR */
-{ yylhsminor.yy617 = yymsp[-2].minor.yy617; }
- yymsp[-2].minor.yy617 = yylhsminor.yy617;
+{ yylhsminor.yy209 = yymsp[-2].minor.yy209; }
+ yymsp[-2].minor.yy209 = yylhsminor.yy209;
break;
case 41: /* cmd ::= CREATE DNODE dnode_endpoint */
-{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy617, NULL); }
+{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy209, NULL); }
break;
case 42: /* cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */
-{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0); }
break;
case 43: /* cmd ::= DROP DNODE NK_INTEGER */
{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy0); }
break;
case 44: /* cmd ::= DROP DNODE dnode_endpoint */
-{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy209); }
break;
case 45: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */
{ pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); }
@@ -3544,31 +3494,31 @@ static YYACTIONTYPE yy_reduce(
case 49: /* dnode_endpoint ::= NK_STRING */
case 50: /* dnode_endpoint ::= NK_ID */ yytestcase(yyruleno==50);
case 51: /* dnode_endpoint ::= NK_IPTOKEN */ yytestcase(yyruleno==51);
- case 314: /* db_name ::= NK_ID */ yytestcase(yyruleno==314);
- case 315: /* table_name ::= NK_ID */ yytestcase(yyruleno==315);
- case 316: /* column_name ::= NK_ID */ yytestcase(yyruleno==316);
- case 317: /* function_name ::= NK_ID */ yytestcase(yyruleno==317);
- case 318: /* table_alias ::= NK_ID */ yytestcase(yyruleno==318);
- case 319: /* column_alias ::= NK_ID */ yytestcase(yyruleno==319);
- case 320: /* user_name ::= NK_ID */ yytestcase(yyruleno==320);
- case 321: /* topic_name ::= NK_ID */ yytestcase(yyruleno==321);
- case 322: /* stream_name ::= NK_ID */ yytestcase(yyruleno==322);
- case 323: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==323);
- case 359: /* noarg_func ::= NOW */ yytestcase(yyruleno==359);
- case 360: /* noarg_func ::= TODAY */ yytestcase(yyruleno==360);
- case 361: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==361);
- case 362: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==362);
- case 363: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==363);
- case 364: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==364);
- case 365: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==365);
- case 366: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==366);
- case 367: /* noarg_func ::= USER */ yytestcase(yyruleno==367);
- case 368: /* star_func ::= COUNT */ yytestcase(yyruleno==368);
- case 369: /* star_func ::= FIRST */ yytestcase(yyruleno==369);
- case 370: /* star_func ::= LAST */ yytestcase(yyruleno==370);
- case 371: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==371);
-{ yylhsminor.yy617 = yymsp[0].minor.yy0; }
- yymsp[0].minor.yy617 = yylhsminor.yy617;
+ case 312: /* db_name ::= NK_ID */ yytestcase(yyruleno==312);
+ case 313: /* table_name ::= NK_ID */ yytestcase(yyruleno==313);
+ case 314: /* column_name ::= NK_ID */ yytestcase(yyruleno==314);
+ case 315: /* function_name ::= NK_ID */ yytestcase(yyruleno==315);
+ case 316: /* table_alias ::= NK_ID */ yytestcase(yyruleno==316);
+ case 317: /* column_alias ::= NK_ID */ yytestcase(yyruleno==317);
+ case 318: /* user_name ::= NK_ID */ yytestcase(yyruleno==318);
+ case 319: /* topic_name ::= NK_ID */ yytestcase(yyruleno==319);
+ case 320: /* stream_name ::= NK_ID */ yytestcase(yyruleno==320);
+ case 321: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==321);
+ case 357: /* noarg_func ::= NOW */ yytestcase(yyruleno==357);
+ case 358: /* noarg_func ::= TODAY */ yytestcase(yyruleno==358);
+ case 359: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==359);
+ case 360: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==360);
+ case 361: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==361);
+ case 362: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==362);
+ case 363: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==363);
+ case 364: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==364);
+ case 365: /* noarg_func ::= USER */ yytestcase(yyruleno==365);
+ case 366: /* star_func ::= COUNT */ yytestcase(yyruleno==366);
+ case 367: /* star_func ::= FIRST */ yytestcase(yyruleno==367);
+ case 368: /* star_func ::= LAST */ yytestcase(yyruleno==368);
+ case 369: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==369);
+{ yylhsminor.yy209 = yymsp[0].minor.yy0; }
+ yymsp[0].minor.yy209 = yylhsminor.yy209;
break;
case 52: /* cmd ::= ALTER LOCAL NK_STRING */
{ pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); }
@@ -3601,189 +3551,189 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); }
break;
case 62: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */
-{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy313, &yymsp[-1].minor.yy617, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy293, &yymsp[-1].minor.yy209, yymsp[0].minor.yy272); }
break;
case 63: /* cmd ::= DROP DATABASE exists_opt db_name */
-{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); }
break;
case 64: /* cmd ::= USE db_name */
-{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy209); }
break;
case 65: /* cmd ::= ALTER DATABASE db_name alter_db_options */
-{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy617, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy209, yymsp[0].minor.yy272); }
break;
case 66: /* cmd ::= FLUSH DATABASE db_name */
-{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy209); }
break;
case 67: /* cmd ::= TRIM DATABASE db_name */
-{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[0].minor.yy209); }
break;
case 68: /* not_exists_opt ::= IF NOT EXISTS */
-{ yymsp[-2].minor.yy313 = true; }
+{ yymsp[-2].minor.yy293 = true; }
break;
case 69: /* not_exists_opt ::= */
case 71: /* exists_opt ::= */ yytestcase(yyruleno==71);
case 255: /* analyze_opt ::= */ yytestcase(yyruleno==255);
case 262: /* agg_func_opt ::= */ yytestcase(yyruleno==262);
- case 425: /* set_quantifier_opt ::= */ yytestcase(yyruleno==425);
-{ yymsp[1].minor.yy313 = false; }
+ case 423: /* set_quantifier_opt ::= */ yytestcase(yyruleno==423);
+{ yymsp[1].minor.yy293 = false; }
break;
case 70: /* exists_opt ::= IF EXISTS */
-{ yymsp[-1].minor.yy313 = true; }
+{ yymsp[-1].minor.yy293 = true; }
break;
case 72: /* db_options ::= */
-{ yymsp[1].minor.yy840 = createDefaultDatabaseOptions(pCxt); }
+{ yymsp[1].minor.yy272 = createDefaultDatabaseOptions(pCxt); }
break;
case 73: /* db_options ::= db_options BUFFER NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 74: /* db_options ::= db_options CACHEMODEL NK_STRING */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 75: /* db_options ::= db_options CACHESIZE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 76: /* db_options ::= db_options COMP NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 77: /* db_options ::= db_options DURATION NK_INTEGER */
case 78: /* db_options ::= db_options DURATION NK_VARIABLE */ yytestcase(yyruleno==78);
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 79: /* db_options ::= db_options MAXROWS NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 80: /* db_options ::= db_options MINROWS NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 81: /* db_options ::= db_options KEEP integer_list */
case 82: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==82);
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_KEEP, yymsp[0].minor.yy544); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_KEEP, yymsp[0].minor.yy172); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 83: /* db_options ::= db_options PAGES NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PAGES, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PAGES, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 84: /* db_options ::= db_options PAGESIZE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 85: /* db_options ::= db_options PRECISION NK_STRING */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 86: /* db_options ::= db_options REPLICA NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 87: /* db_options ::= db_options STRICT NK_STRING */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_STRICT, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_STRICT, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 88: /* db_options ::= db_options VGROUPS NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 89: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 90: /* db_options ::= db_options RETENTIONS retention_list */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_RETENTIONS, yymsp[0].minor.yy544); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_RETENTIONS, yymsp[0].minor.yy172); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 91: /* db_options ::= db_options SCHEMALESS NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 92: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 93: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 94: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 95: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-3].minor.yy840, DB_OPTION_WAL_RETENTION_PERIOD, &t);
+ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-3].minor.yy272, DB_OPTION_WAL_RETENTION_PERIOD, &t);
}
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
case 96: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 97: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-3].minor.yy840, DB_OPTION_WAL_RETENTION_SIZE, &t);
+ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-3].minor.yy272, DB_OPTION_WAL_RETENTION_SIZE, &t);
}
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
case 98: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 99: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 100: /* alter_db_options ::= alter_db_option */
-{ yylhsminor.yy840 = createAlterDatabaseOptions(pCxt); yylhsminor.yy840 = setAlterDatabaseOption(pCxt, yylhsminor.yy840, &yymsp[0].minor.yy95); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterDatabaseOptions(pCxt); yylhsminor.yy272 = setAlterDatabaseOption(pCxt, yylhsminor.yy272, &yymsp[0].minor.yy5); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
case 101: /* alter_db_options ::= alter_db_options alter_db_option */
-{ yylhsminor.yy840 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy840, &yymsp[0].minor.yy95); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy272, &yymsp[0].minor.yy5); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
case 102: /* alter_db_option ::= CACHEMODEL NK_STRING */
-{ yymsp[-1].minor.yy95.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
+{ yymsp[-1].minor.yy5.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
break;
case 103: /* alter_db_option ::= CACHESIZE NK_INTEGER */
-{ yymsp[-1].minor.yy95.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
+{ yymsp[-1].minor.yy5.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
break;
case 104: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
-{ yymsp[-1].minor.yy95.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
+{ yymsp[-1].minor.yy5.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
break;
case 105: /* alter_db_option ::= KEEP integer_list */
case 106: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==106);
-{ yymsp[-1].minor.yy95.type = DB_OPTION_KEEP; yymsp[-1].minor.yy95.pList = yymsp[0].minor.yy544; }
+{ yymsp[-1].minor.yy5.type = DB_OPTION_KEEP; yymsp[-1].minor.yy5.pList = yymsp[0].minor.yy172; }
break;
case 107: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */
-{ yymsp[-1].minor.yy95.type = DB_OPTION_WAL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
+{ yymsp[-1].minor.yy5.type = DB_OPTION_WAL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
break;
case 108: /* integer_list ::= NK_INTEGER */
-{ yylhsminor.yy544 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+{ yylhsminor.yy172 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy172 = yylhsminor.yy172;
break;
case 109: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */
- case 284: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==284);
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
+ case 282: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==282);
+{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy172 = yylhsminor.yy172;
break;
case 110: /* variable_list ::= NK_VARIABLE */
-{ yylhsminor.yy544 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+{ yylhsminor.yy172 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy172 = yylhsminor.yy172;
break;
case 111: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
+{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy172 = yylhsminor.yy172;
break;
case 112: /* retention_list ::= retention */
case 132: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==132);
@@ -3792,266 +3742,266 @@ static YYACTIONTYPE yy_reduce(
case 185: /* rollup_func_list ::= rollup_func_name */ yytestcase(yyruleno==185);
case 190: /* col_name_list ::= col_name */ yytestcase(yyruleno==190);
case 238: /* func_list ::= func */ yytestcase(yyruleno==238);
- case 312: /* literal_list ::= signed_literal */ yytestcase(yyruleno==312);
- case 374: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==374);
- case 428: /* select_list ::= select_item */ yytestcase(yyruleno==428);
- case 482: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==482);
-{ yylhsminor.yy544 = createNodeList(pCxt, yymsp[0].minor.yy840); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+ case 310: /* literal_list ::= signed_literal */ yytestcase(yyruleno==310);
+ case 372: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==372);
+ case 426: /* select_list ::= select_item */ yytestcase(yyruleno==426);
+ case 480: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==480);
+{ yylhsminor.yy172 = createNodeList(pCxt, yymsp[0].minor.yy272); }
+ yymsp[0].minor.yy172 = yylhsminor.yy172;
break;
case 113: /* retention_list ::= retention_list NK_COMMA retention */
case 143: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==143);
case 186: /* rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ yytestcase(yyruleno==186);
case 191: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==191);
case 239: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==239);
- case 313: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==313);
- case 375: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==375);
- case 429: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==429);
- case 483: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==483);
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
+ case 311: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==311);
+ case 373: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==373);
+ case 427: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==427);
+ case 481: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==481);
+{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); }
+ yymsp[-2].minor.yy172 = yylhsminor.yy172;
break;
case 114: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
-{ yylhsminor.yy840 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 115: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
case 117: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==117);
-{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy313, yymsp[-5].minor.yy840, yymsp[-3].minor.yy544, yymsp[-1].minor.yy544, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy293, yymsp[-5].minor.yy272, yymsp[-3].minor.yy172, yymsp[-1].minor.yy172, yymsp[0].minor.yy272); }
break;
case 116: /* cmd ::= CREATE TABLE multi_create_clause */
-{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy544); }
+{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy172); }
break;
case 118: /* cmd ::= DROP TABLE multi_drop_clause */
-{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy544); }
+{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy172); }
break;
case 119: /* cmd ::= DROP STABLE exists_opt full_table_name */
-{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); }
break;
case 120: /* cmd ::= ALTER TABLE alter_table_clause */
- case 286: /* cmd ::= query_expression */ yytestcase(yyruleno==286);
-{ pCxt->pRootNode = yymsp[0].minor.yy840; }
+ case 284: /* cmd ::= query_expression */ yytestcase(yyruleno==284);
+{ pCxt->pRootNode = yymsp[0].minor.yy272; }
break;
case 121: /* cmd ::= ALTER STABLE alter_table_clause */
-{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy272); }
break;
case 122: /* alter_table_clause ::= full_table_name alter_table_options */
-{ yylhsminor.yy840 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
case 123: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
-{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); }
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
case 124: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */
-{ yylhsminor.yy840 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy840, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy617); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy272, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy209); }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
case 125: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
-{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); }
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
case 126: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
-{ yylhsminor.yy840 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); }
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
case 127: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */
-{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); }
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
case 128: /* alter_table_clause ::= full_table_name DROP TAG column_name */
-{ yylhsminor.yy840 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy840, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy617); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy272, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy209); }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
case 129: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
-{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); }
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
case 130: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
-{ yylhsminor.yy840 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); }
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
case 131: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
-{ yylhsminor.yy840 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy840, &yymsp[-2].minor.yy617, yymsp[0].minor.yy840); }
- yymsp[-5].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy272, &yymsp[-2].minor.yy209, yymsp[0].minor.yy272); }
+ yymsp[-5].minor.yy272 = yylhsminor.yy272;
break;
case 133: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
case 136: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==136);
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-1].minor.yy544, yymsp[0].minor.yy840); }
- yymsp[-1].minor.yy544 = yylhsminor.yy544;
+{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-1].minor.yy172, yymsp[0].minor.yy272); }
+ yymsp[-1].minor.yy172 = yylhsminor.yy172;
break;
case 134: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
-{ yylhsminor.yy840 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy313, yymsp[-8].minor.yy840, yymsp[-6].minor.yy840, yymsp[-5].minor.yy544, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); }
- yymsp[-9].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy293, yymsp[-8].minor.yy272, yymsp[-6].minor.yy272, yymsp[-5].minor.yy172, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); }
+ yymsp[-9].minor.yy272 = yylhsminor.yy272;
break;
case 137: /* drop_table_clause ::= exists_opt full_table_name */
-{ yylhsminor.yy840 = createDropTableClause(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createDropTableClause(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
case 138: /* specific_cols_opt ::= */
case 169: /* tags_def_opt ::= */ yytestcase(yyruleno==169);
- case 437: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==437);
- case 454: /* group_by_clause_opt ::= */ yytestcase(yyruleno==454);
- case 470: /* order_by_clause_opt ::= */ yytestcase(yyruleno==470);
-{ yymsp[1].minor.yy544 = NULL; }
+ case 435: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==435);
+ case 452: /* group_by_clause_opt ::= */ yytestcase(yyruleno==452);
+ case 468: /* order_by_clause_opt ::= */ yytestcase(yyruleno==468);
+{ yymsp[1].minor.yy172 = NULL; }
break;
case 139: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */
-{ yymsp[-2].minor.yy544 = yymsp[-1].minor.yy544; }
+{ yymsp[-2].minor.yy172 = yymsp[-1].minor.yy172; }
break;
case 140: /* full_table_name ::= table_name */
-{ yylhsminor.yy840 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy617, NULL); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy209, NULL); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
case 141: /* full_table_name ::= db_name NK_DOT table_name */
-{ yylhsminor.yy840 = createRealTableNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617, NULL); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createRealTableNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209, NULL); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 144: /* column_def ::= column_name type_name */
-{ yylhsminor.yy840 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784, NULL); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616, NULL); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
case 145: /* column_def ::= column_name type_name COMMENT NK_STRING */
-{ yylhsminor.yy840 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-2].minor.yy784, &yymsp[0].minor.yy0); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-2].minor.yy616, &yymsp[0].minor.yy0); }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
case 146: /* type_name ::= BOOL */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BOOL); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BOOL); }
break;
case 147: /* type_name ::= TINYINT */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_TINYINT); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_TINYINT); }
break;
case 148: /* type_name ::= SMALLINT */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_SMALLINT); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_SMALLINT); }
break;
case 149: /* type_name ::= INT */
case 150: /* type_name ::= INTEGER */ yytestcase(yyruleno==150);
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_INT); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_INT); }
break;
case 151: /* type_name ::= BIGINT */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BIGINT); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BIGINT); }
break;
case 152: /* type_name ::= FLOAT */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_FLOAT); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_FLOAT); }
break;
case 153: /* type_name ::= DOUBLE */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_DOUBLE); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_DOUBLE); }
break;
case 154: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); }
+{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); }
break;
case 155: /* type_name ::= TIMESTAMP */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); }
break;
case 156: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); }
+{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); }
break;
case 157: /* type_name ::= TINYINT UNSIGNED */
-{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UTINYINT); }
+{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UTINYINT); }
break;
case 158: /* type_name ::= SMALLINT UNSIGNED */
-{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_USMALLINT); }
+{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_USMALLINT); }
break;
case 159: /* type_name ::= INT UNSIGNED */
-{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UINT); }
+{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UINT); }
break;
case 160: /* type_name ::= BIGINT UNSIGNED */
-{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UBIGINT); }
+{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UBIGINT); }
break;
case 161: /* type_name ::= JSON */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_JSON); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_JSON); }
break;
case 162: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); }
+{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); }
break;
case 163: /* type_name ::= MEDIUMBLOB */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); }
break;
case 164: /* type_name ::= BLOB */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BLOB); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BLOB); }
break;
case 165: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); }
+{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); }
break;
case 166: /* type_name ::= DECIMAL */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
case 167: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+{ yymsp[-3].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
case 168: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
-{ yymsp[-5].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+{ yymsp[-5].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
case 170: /* tags_def_opt ::= tags_def */
- case 373: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==373);
-{ yylhsminor.yy544 = yymsp[0].minor.yy544; }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+ case 371: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==371);
+{ yylhsminor.yy172 = yymsp[0].minor.yy172; }
+ yymsp[0].minor.yy172 = yylhsminor.yy172;
break;
case 171: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */
-{ yymsp[-3].minor.yy544 = yymsp[-1].minor.yy544; }
+{ yymsp[-3].minor.yy172 = yymsp[-1].minor.yy172; }
break;
case 172: /* table_options ::= */
-{ yymsp[1].minor.yy840 = createDefaultTableOptions(pCxt); }
+{ yymsp[1].minor.yy272 = createDefaultTableOptions(pCxt); }
break;
case 173: /* table_options ::= table_options COMMENT NK_STRING */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 174: /* table_options ::= table_options MAX_DELAY duration_list */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy544); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy172); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 175: /* table_options ::= table_options WATERMARK duration_list */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy544); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy172); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 176: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-4].minor.yy840, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy544); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-4].minor.yy272, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy172); }
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
case 177: /* table_options ::= table_options TTL NK_INTEGER */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 178: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-4].minor.yy840, TABLE_OPTION_SMA, yymsp[-1].minor.yy544); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-4].minor.yy272, TABLE_OPTION_SMA, yymsp[-1].minor.yy172); }
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
case 179: /* alter_table_options ::= alter_table_option */
-{ yylhsminor.yy840 = createAlterTableOptions(pCxt); yylhsminor.yy840 = setTableOption(pCxt, yylhsminor.yy840, yymsp[0].minor.yy95.type, &yymsp[0].minor.yy95.val); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createAlterTableOptions(pCxt); yylhsminor.yy272 = setTableOption(pCxt, yylhsminor.yy272, yymsp[0].minor.yy5.type, &yymsp[0].minor.yy5.val); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
case 180: /* alter_table_options ::= alter_table_options alter_table_option */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy95.type, &yymsp[0].minor.yy95.val); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy5.type, &yymsp[0].minor.yy5.val); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
case 181: /* alter_table_option ::= COMMENT NK_STRING */
-{ yymsp[-1].minor.yy95.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
+{ yymsp[-1].minor.yy5.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
break;
case 182: /* alter_table_option ::= TTL NK_INTEGER */
-{ yymsp[-1].minor.yy95.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
+{ yymsp[-1].minor.yy5.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
break;
case 183: /* duration_list ::= duration_literal */
- case 340: /* expression_list ::= expression */ yytestcase(yyruleno==340);
-{ yylhsminor.yy544 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840)); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+ case 338: /* expression_list ::= expression */ yytestcase(yyruleno==338);
+{ yylhsminor.yy172 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272)); }
+ yymsp[0].minor.yy172 = yylhsminor.yy172;
break;
case 184: /* duration_list ::= duration_list NK_COMMA duration_literal */
- case 341: /* expression_list ::= expression_list NK_COMMA expression */ yytestcase(yyruleno==341);
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, releaseRawExprNode(pCxt, yymsp[0].minor.yy840)); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
+ case 339: /* expression_list ::= expression_list NK_COMMA expression */ yytestcase(yyruleno==339);
+{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, releaseRawExprNode(pCxt, yymsp[0].minor.yy272)); }
+ yymsp[-2].minor.yy172 = yylhsminor.yy172;
break;
case 187: /* rollup_func_name ::= function_name */
-{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[0].minor.yy617, NULL); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[0].minor.yy209, NULL); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
case 188: /* rollup_func_name ::= FIRST */
case 189: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==189);
-{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
case 192: /* col_name ::= column_name */
-{ yylhsminor.yy840 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy617); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy209); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
case 193: /* cmd ::= SHOW DNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT); }
@@ -4063,13 +4013,13 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT); }
break;
case 196: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, OP_TYPE_LIKE); }
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, OP_TYPE_LIKE); }
break;
case 197: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, OP_TYPE_LIKE); }
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, OP_TYPE_LIKE); }
break;
case 198: /* cmd ::= SHOW db_name_cond_opt VGROUPS */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy840, NULL, OP_TYPE_LIKE); }
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy272, NULL, OP_TYPE_LIKE); }
break;
case 199: /* cmd ::= SHOW MNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT); }
@@ -4084,7 +4034,7 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT); }
break;
case 203: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy840, yymsp[-1].minor.yy840, OP_TYPE_EQUAL); }
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy272, yymsp[-1].minor.yy272, OP_TYPE_EQUAL); }
break;
case 204: /* cmd ::= SHOW STREAMS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT); }
@@ -4103,13 +4053,13 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCES_STMT); }
break;
case 210: /* cmd ::= SHOW CREATE DATABASE db_name */
-{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy209); }
break;
case 211: /* cmd ::= SHOW CREATE TABLE full_table_name */
-{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy272); }
break;
case 212: /* cmd ::= SHOW CREATE STABLE full_table_name */
-{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy272); }
break;
case 213: /* cmd ::= SHOW QUERIES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT); }
@@ -4142,7 +4092,7 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT); }
break;
case 223: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
-{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy272); }
break;
case 224: /* cmd ::= SHOW CONSUMERS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); }
@@ -4151,713 +4101,711 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); }
break;
case 226: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy840, yymsp[-1].minor.yy840, OP_TYPE_EQUAL); }
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy272, yymsp[-1].minor.yy272, OP_TYPE_EQUAL); }
break;
case 227: /* db_name_cond_opt ::= */
case 232: /* from_db_opt ::= */ yytestcase(yyruleno==232);
-{ yymsp[1].minor.yy840 = createDefaultDatabaseCondValue(pCxt); }
+{ yymsp[1].minor.yy272 = createDefaultDatabaseCondValue(pCxt); }
break;
case 228: /* db_name_cond_opt ::= db_name NK_DOT */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy617); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy209); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
case 229: /* like_pattern_opt ::= */
- case 268: /* into_opt ::= */ yytestcase(yyruleno==268);
- case 406: /* from_clause_opt ::= */ yytestcase(yyruleno==406);
- case 435: /* where_clause_opt ::= */ yytestcase(yyruleno==435);
- case 439: /* twindow_clause_opt ::= */ yytestcase(yyruleno==439);
- case 444: /* sliding_opt ::= */ yytestcase(yyruleno==444);
- case 446: /* fill_opt ::= */ yytestcase(yyruleno==446);
- case 458: /* having_clause_opt ::= */ yytestcase(yyruleno==458);
- case 460: /* range_opt ::= */ yytestcase(yyruleno==460);
- case 462: /* every_opt ::= */ yytestcase(yyruleno==462);
- case 472: /* slimit_clause_opt ::= */ yytestcase(yyruleno==472);
- case 476: /* limit_clause_opt ::= */ yytestcase(yyruleno==476);
-{ yymsp[1].minor.yy840 = NULL; }
+ case 404: /* from_clause_opt ::= */ yytestcase(yyruleno==404);
+ case 433: /* where_clause_opt ::= */ yytestcase(yyruleno==433);
+ case 437: /* twindow_clause_opt ::= */ yytestcase(yyruleno==437);
+ case 442: /* sliding_opt ::= */ yytestcase(yyruleno==442);
+ case 444: /* fill_opt ::= */ yytestcase(yyruleno==444);
+ case 456: /* having_clause_opt ::= */ yytestcase(yyruleno==456);
+ case 458: /* range_opt ::= */ yytestcase(yyruleno==458);
+ case 460: /* every_opt ::= */ yytestcase(yyruleno==460);
+ case 470: /* slimit_clause_opt ::= */ yytestcase(yyruleno==470);
+ case 474: /* limit_clause_opt ::= */ yytestcase(yyruleno==474);
+{ yymsp[1].minor.yy272 = NULL; }
break;
case 230: /* like_pattern_opt ::= LIKE NK_STRING */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
+{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
break;
case 231: /* table_name_cond ::= table_name */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy617); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy209); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
case 233: /* from_db_opt ::= FROM db_name */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy617); }
+{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy209); }
break;
case 234: /* cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
-{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy313, yymsp[-3].minor.yy840, yymsp[-1].minor.yy840, NULL, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy293, yymsp[-3].minor.yy272, yymsp[-1].minor.yy272, NULL, yymsp[0].minor.yy272); }
break;
case 235: /* cmd ::= DROP INDEX exists_opt full_table_name */
-{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); }
break;
case 236: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
-{ yymsp[-9].minor.yy840 = createIndexOption(pCxt, yymsp[-7].minor.yy544, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+{ yymsp[-9].minor.yy272 = createIndexOption(pCxt, yymsp[-7].minor.yy172, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
break;
case 237: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
-{ yymsp[-11].minor.yy840 = createIndexOption(pCxt, yymsp[-9].minor.yy544, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+{ yymsp[-11].minor.yy272 = createIndexOption(pCxt, yymsp[-9].minor.yy172, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
break;
case 240: /* func ::= function_name NK_LP expression_list NK_RP */
-{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-1].minor.yy544); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-1].minor.yy172); }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
case 241: /* sma_stream_opt ::= */
- case 270: /* stream_options ::= */ yytestcase(yyruleno==270);
-{ yymsp[1].minor.yy840 = createStreamOptions(pCxt); }
+ case 268: /* stream_options ::= */ yytestcase(yyruleno==268);
+{ yymsp[1].minor.yy272 = createStreamOptions(pCxt); }
break;
case 242: /* sma_stream_opt ::= stream_options WATERMARK duration_literal */
- case 274: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==274);
-{ ((SStreamOptions*)yymsp[-2].minor.yy840)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-2].minor.yy840; }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 272: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==272);
+{ ((SStreamOptions*)yymsp[-2].minor.yy272)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-2].minor.yy272; }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 243: /* sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
-{ ((SStreamOptions*)yymsp[-2].minor.yy840)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-2].minor.yy840; }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ ((SStreamOptions*)yymsp[-2].minor.yy272)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-2].minor.yy272; }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 244: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */
-{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy313, &yymsp[-2].minor.yy617, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy293, &yymsp[-2].minor.yy209, yymsp[0].minor.yy272); }
break;
case 245: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy313, &yymsp[-3].minor.yy617, &yymsp[0].minor.yy617, false); }
+{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy293, &yymsp[-3].minor.yy209, &yymsp[0].minor.yy209, false); }
break;
case 246: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy313, &yymsp[-5].minor.yy617, &yymsp[0].minor.yy617, true); }
+{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, &yymsp[0].minor.yy209, true); }
break;
case 247: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy313, &yymsp[-3].minor.yy617, yymsp[0].minor.yy840, false); }
+{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy293, &yymsp[-3].minor.yy209, yymsp[0].minor.yy272, false); }
break;
case 248: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy313, &yymsp[-5].minor.yy617, yymsp[0].minor.yy840, true); }
+{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, yymsp[0].minor.yy272, true); }
break;
case 249: /* cmd ::= DROP TOPIC exists_opt topic_name */
-{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); }
break;
case 250: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
-{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy313, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy293, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); }
break;
case 251: /* cmd ::= DESC full_table_name */
case 252: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==252);
-{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy272); }
break;
case 253: /* cmd ::= RESET QUERY CACHE */
{ pCxt->pRootNode = createResetQueryCacheStmt(pCxt); }
break;
case 254: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */
-{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy313, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy293, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
break;
case 256: /* analyze_opt ::= ANALYZE */
case 263: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==263);
- case 426: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==426);
-{ yymsp[0].minor.yy313 = true; }
+ case 424: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==424);
+{ yymsp[0].minor.yy293 = true; }
break;
case 257: /* explain_options ::= */
-{ yymsp[1].minor.yy840 = createDefaultExplainOptions(pCxt); }
+{ yymsp[1].minor.yy272 = createDefaultExplainOptions(pCxt); }
break;
case 258: /* explain_options ::= explain_options VERBOSE NK_BOOL */
-{ yylhsminor.yy840 = setExplainVerbose(pCxt, yymsp[-2].minor.yy840, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setExplainVerbose(pCxt, yymsp[-2].minor.yy272, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 259: /* explain_options ::= explain_options RATIO NK_FLOAT */
-{ yylhsminor.yy840 = setExplainRatio(pCxt, yymsp[-2].minor.yy840, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy272 = setExplainRatio(pCxt, yymsp[-2].minor.yy272, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
case 260: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
-{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy313, yymsp[-8].minor.yy313, &yymsp[-5].minor.yy617, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy784, yymsp[0].minor.yy844); }
+{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy293, yymsp[-8].minor.yy293, &yymsp[-5].minor.yy209, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy616, yymsp[0].minor.yy232); }
break;
case 261: /* cmd ::= DROP FUNCTION exists_opt function_name */
-{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); }
break;
case 264: /* bufsize_opt ::= */
-{ yymsp[1].minor.yy844 = 0; }
+{ yymsp[1].minor.yy232 = 0; }
break;
case 265: /* bufsize_opt ::= BUFSIZE NK_INTEGER */
-{ yymsp[-1].minor.yy844 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); }
+{ yymsp[-1].minor.yy232 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); }
break;
- case 266: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */
-{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy313, &yymsp[-4].minor.yy617, yymsp[-2].minor.yy840, yymsp[-3].minor.yy840, yymsp[0].minor.yy840); }
+ case 266: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression */
+{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, yymsp[-2].minor.yy272, yymsp[-4].minor.yy272, yymsp[0].minor.yy272); }
break;
case 267: /* cmd ::= DROP STREAM exists_opt stream_name */
-{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); }
break;
- case 269: /* into_opt ::= INTO full_table_name */
- case 407: /* from_clause_opt ::= FROM table_reference_list */ yytestcase(yyruleno==407);
- case 436: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==436);
- case 459: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==459);
-{ yymsp[-1].minor.yy840 = yymsp[0].minor.yy840; }
+ case 269: /* stream_options ::= stream_options TRIGGER AT_ONCE */
+{ ((SStreamOptions*)yymsp[-2].minor.yy272)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy272 = yymsp[-2].minor.yy272; }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 271: /* stream_options ::= stream_options TRIGGER AT_ONCE */
-{ ((SStreamOptions*)yymsp[-2].minor.yy840)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy840 = yymsp[-2].minor.yy840; }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 270: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
+{ ((SStreamOptions*)yymsp[-2].minor.yy272)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy272 = yymsp[-2].minor.yy272; }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 272: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
-{ ((SStreamOptions*)yymsp[-2].minor.yy840)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy840 = yymsp[-2].minor.yy840; }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 271: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
+{ ((SStreamOptions*)yymsp[-3].minor.yy272)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy272)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-3].minor.yy272; }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
- case 273: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
-{ ((SStreamOptions*)yymsp[-3].minor.yy840)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy840)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-3].minor.yy840; }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 273: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
+{ ((SStreamOptions*)yymsp[-3].minor.yy272)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy272 = yymsp[-3].minor.yy272; }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
- case 275: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
-{ ((SStreamOptions*)yymsp[-3].minor.yy840)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy840 = yymsp[-3].minor.yy840; }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
- break;
- case 276: /* cmd ::= KILL CONNECTION NK_INTEGER */
+ case 274: /* cmd ::= KILL CONNECTION NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); }
break;
- case 277: /* cmd ::= KILL QUERY NK_STRING */
+ case 275: /* cmd ::= KILL QUERY NK_STRING */
{ pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); }
break;
- case 278: /* cmd ::= KILL TRANSACTION NK_INTEGER */
+ case 276: /* cmd ::= KILL TRANSACTION NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); }
break;
- case 279: /* cmd ::= BALANCE VGROUP */
+ case 277: /* cmd ::= BALANCE VGROUP */
{ pCxt->pRootNode = createBalanceVgroupStmt(pCxt); }
break;
- case 280: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
+ case 278: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
{ pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 281: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
-{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy544); }
+ case 279: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
+{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy172); }
break;
- case 282: /* cmd ::= SPLIT VGROUP NK_INTEGER */
+ case 280: /* cmd ::= SPLIT VGROUP NK_INTEGER */
{ pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); }
break;
- case 283: /* dnode_list ::= DNODE NK_INTEGER */
-{ yymsp[-1].minor.yy544 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- break;
- case 285: /* cmd ::= DELETE FROM full_table_name where_clause_opt */
-{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
- break;
- case 287: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
-{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy840, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); }
- break;
- case 288: /* cmd ::= INSERT INTO full_table_name query_expression */
-{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy840, NULL, yymsp[0].minor.yy840); }
- break;
- case 289: /* literal ::= NK_INTEGER */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 290: /* literal ::= NK_FLOAT */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 291: /* literal ::= NK_STRING */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 292: /* literal ::= NK_BOOL */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 293: /* literal ::= TIMESTAMP NK_STRING */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
- break;
- case 294: /* literal ::= duration_literal */
- case 304: /* signed_literal ::= signed */ yytestcase(yyruleno==304);
- case 324: /* expression ::= literal */ yytestcase(yyruleno==324);
- case 325: /* expression ::= pseudo_column */ yytestcase(yyruleno==325);
- case 326: /* expression ::= column_reference */ yytestcase(yyruleno==326);
- case 327: /* expression ::= function_expression */ yytestcase(yyruleno==327);
- case 328: /* expression ::= subquery */ yytestcase(yyruleno==328);
- case 356: /* function_expression ::= literal_func */ yytestcase(yyruleno==356);
- case 398: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==398);
- case 402: /* boolean_primary ::= predicate */ yytestcase(yyruleno==402);
- case 404: /* common_expression ::= expression */ yytestcase(yyruleno==404);
- case 405: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==405);
- case 408: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==408);
- case 410: /* table_reference ::= table_primary */ yytestcase(yyruleno==410);
- case 411: /* table_reference ::= joined_table */ yytestcase(yyruleno==411);
- case 415: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==415);
- case 465: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==465);
- case 468: /* query_primary ::= query_specification */ yytestcase(yyruleno==468);
-{ yylhsminor.yy840 = yymsp[0].minor.yy840; }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 295: /* literal ::= NULL */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 296: /* literal ::= NK_QUESTION */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 297: /* duration_literal ::= NK_VARIABLE */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 298: /* signed ::= NK_INTEGER */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 299: /* signed ::= NK_PLUS NK_INTEGER */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
- break;
- case 300: /* signed ::= NK_MINUS NK_INTEGER */
+ case 281: /* dnode_list ::= DNODE NK_INTEGER */
+{ yymsp[-1].minor.yy172 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ break;
+ case 283: /* cmd ::= DELETE FROM full_table_name where_clause_opt */
+{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
+ break;
+ case 285: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
+{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy272, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); }
+ break;
+ case 286: /* cmd ::= INSERT INTO full_table_name query_expression */
+{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy272, NULL, yymsp[0].minor.yy272); }
+ break;
+ case 287: /* literal ::= NK_INTEGER */
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 288: /* literal ::= NK_FLOAT */
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 289: /* literal ::= NK_STRING */
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 290: /* literal ::= NK_BOOL */
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 291: /* literal ::= TIMESTAMP NK_STRING */
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 292: /* literal ::= duration_literal */
+ case 302: /* signed_literal ::= signed */ yytestcase(yyruleno==302);
+ case 322: /* expression ::= literal */ yytestcase(yyruleno==322);
+ case 323: /* expression ::= pseudo_column */ yytestcase(yyruleno==323);
+ case 324: /* expression ::= column_reference */ yytestcase(yyruleno==324);
+ case 325: /* expression ::= function_expression */ yytestcase(yyruleno==325);
+ case 326: /* expression ::= subquery */ yytestcase(yyruleno==326);
+ case 354: /* function_expression ::= literal_func */ yytestcase(yyruleno==354);
+ case 396: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==396);
+ case 400: /* boolean_primary ::= predicate */ yytestcase(yyruleno==400);
+ case 402: /* common_expression ::= expression */ yytestcase(yyruleno==402);
+ case 403: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==403);
+ case 406: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==406);
+ case 408: /* table_reference ::= table_primary */ yytestcase(yyruleno==408);
+ case 409: /* table_reference ::= joined_table */ yytestcase(yyruleno==409);
+ case 413: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==413);
+ case 463: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==463);
+ case 466: /* query_primary ::= query_specification */ yytestcase(yyruleno==466);
+{ yylhsminor.yy272 = yymsp[0].minor.yy272; }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 293: /* literal ::= NULL */
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 294: /* literal ::= NK_QUESTION */
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 295: /* duration_literal ::= NK_VARIABLE */
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 296: /* signed ::= NK_INTEGER */
+{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 297: /* signed ::= NK_PLUS NK_INTEGER */
+{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
+ break;
+ case 298: /* signed ::= NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t);
+ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t);
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
- case 301: /* signed ::= NK_FLOAT */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 299: /* signed ::= NK_FLOAT */
+{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
- case 302: /* signed ::= NK_PLUS NK_FLOAT */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
+ case 300: /* signed ::= NK_PLUS NK_FLOAT */
+{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
break;
- case 303: /* signed ::= NK_MINUS NK_FLOAT */
+ case 301: /* signed ::= NK_MINUS NK_FLOAT */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t);
+ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t);
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
- case 305: /* signed_literal ::= NK_STRING */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 303: /* signed_literal ::= NK_STRING */
+{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
- case 306: /* signed_literal ::= NK_BOOL */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 304: /* signed_literal ::= NK_BOOL */
+{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
- case 307: /* signed_literal ::= TIMESTAMP NK_STRING */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); }
+ case 305: /* signed_literal ::= TIMESTAMP NK_STRING */
+{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); }
break;
- case 308: /* signed_literal ::= duration_literal */
- case 310: /* signed_literal ::= literal_func */ yytestcase(yyruleno==310);
- case 376: /* star_func_para ::= expression */ yytestcase(yyruleno==376);
- case 431: /* select_item ::= common_expression */ yytestcase(yyruleno==431);
- case 481: /* search_condition ::= common_expression */ yytestcase(yyruleno==481);
-{ yylhsminor.yy840 = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 306: /* signed_literal ::= duration_literal */
+ case 308: /* signed_literal ::= literal_func */ yytestcase(yyruleno==308);
+ case 374: /* star_func_para ::= expression */ yytestcase(yyruleno==374);
+ case 429: /* select_item ::= common_expression */ yytestcase(yyruleno==429);
+ case 479: /* search_condition ::= common_expression */ yytestcase(yyruleno==479);
+{ yylhsminor.yy272 = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
- case 309: /* signed_literal ::= NULL */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 307: /* signed_literal ::= NULL */
+{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
- case 311: /* signed_literal ::= NK_QUESTION */
-{ yylhsminor.yy840 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 309: /* signed_literal ::= NK_QUESTION */
+{ yylhsminor.yy272 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
- case 329: /* expression ::= NK_LP expression NK_RP */
- case 403: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==403);
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 327: /* expression ::= NK_LP expression NK_RP */
+ case 401: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==401);
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 330: /* expression ::= NK_PLUS expression */
+ case 328: /* expression ::= NK_PLUS expression */
{
- SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy840));
+ SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy272));
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
- case 331: /* expression ::= NK_MINUS expression */
+ case 329: /* expression ::= NK_MINUS expression */
{
- SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy840), NULL));
+ SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy272), NULL));
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
- case 332: /* expression ::= expression NK_PLUS expression */
+ case 330: /* expression ::= expression NK_PLUS expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 333: /* expression ::= expression NK_MINUS expression */
+ case 331: /* expression ::= expression NK_MINUS expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 334: /* expression ::= expression NK_STAR expression */
+ case 332: /* expression ::= expression NK_STAR expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 335: /* expression ::= expression NK_SLASH expression */
+ case 333: /* expression ::= expression NK_SLASH expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 336: /* expression ::= expression NK_REM expression */
+ case 334: /* expression ::= expression NK_REM expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 337: /* expression ::= column_reference NK_ARROW NK_STRING */
+ case 335: /* expression ::= column_reference NK_ARROW NK_STRING */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 338: /* expression ::= expression NK_BITAND expression */
+ case 336: /* expression ::= expression NK_BITAND expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 339: /* expression ::= expression NK_BITOR expression */
+ case 337: /* expression ::= expression NK_BITOR expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 342: /* column_reference ::= column_name */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy617, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy617)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 343: /* column_reference ::= table_name NK_DOT column_name */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617, createColumnNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 344: /* pseudo_column ::= ROWTS */
- case 345: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==345);
- case 347: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==347);
- case 348: /* pseudo_column ::= QEND */ yytestcase(yyruleno==348);
- case 349: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==349);
- case 350: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==350);
- case 351: /* pseudo_column ::= WEND */ yytestcase(yyruleno==351);
- case 352: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==352);
- case 358: /* literal_func ::= NOW */ yytestcase(yyruleno==358);
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 346: /* pseudo_column ::= table_name NK_DOT TBNAME */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy617)))); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 353: /* function_expression ::= function_name NK_LP expression_list NK_RP */
- case 354: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==354);
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-1].minor.yy544)); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
- break;
- case 355: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy784)); }
- yymsp[-5].minor.yy840 = yylhsminor.yy840;
- break;
- case 357: /* literal_func ::= noarg_func NK_LP NK_RP */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy617, NULL)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 372: /* star_func_para_list ::= NK_STAR */
-{ yylhsminor.yy544 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
- break;
- case 377: /* star_func_para ::= table_name NK_DOT NK_STAR */
- case 434: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==434);
-{ yylhsminor.yy840 = createColumnNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 378: /* predicate ::= expression compare_op expression */
- case 383: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==383);
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 340: /* column_reference ::= column_name */
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy209, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy209)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 341: /* column_reference ::= table_name NK_DOT column_name */
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209, createColumnNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209)); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 342: /* pseudo_column ::= ROWTS */
+ case 343: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==343);
+ case 345: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==345);
+ case 346: /* pseudo_column ::= QEND */ yytestcase(yyruleno==346);
+ case 347: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==347);
+ case 348: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==348);
+ case 349: /* pseudo_column ::= WEND */ yytestcase(yyruleno==349);
+ case 350: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==350);
+ case 356: /* literal_func ::= NOW */ yytestcase(yyruleno==356);
+{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 344: /* pseudo_column ::= table_name NK_DOT TBNAME */
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy209)))); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 351: /* function_expression ::= function_name NK_LP expression_list NK_RP */
+ case 352: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==352);
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-1].minor.yy172)); }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 353: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy616)); }
+ yymsp[-5].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 355: /* literal_func ::= noarg_func NK_LP NK_RP */
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy209, NULL)); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 370: /* star_func_para_list ::= NK_STAR */
+{ yylhsminor.yy172 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy172 = yylhsminor.yy172;
+ break;
+ case 375: /* star_func_para ::= table_name NK_DOT NK_STAR */
+ case 432: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==432);
+{ yylhsminor.yy272 = createColumnNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 376: /* predicate ::= expression compare_op expression */
+ case 381: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==381);
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy198, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy392, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 379: /* predicate ::= expression BETWEEN expression AND expression */
+ case 377: /* predicate ::= expression BETWEEN expression AND expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy840), releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy272), releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+ yymsp[-4].minor.yy272 = yylhsminor.yy272;
break;
- case 380: /* predicate ::= expression NOT BETWEEN expression AND expression */
+ case 378: /* predicate ::= expression NOT BETWEEN expression AND expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-5].minor.yy840 = yylhsminor.yy840;
+ yymsp[-5].minor.yy272 = yylhsminor.yy272;
break;
- case 381: /* predicate ::= expression IS NULL */
+ case 379: /* predicate ::= expression IS NULL */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), NULL));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), NULL));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 382: /* predicate ::= expression IS NOT NULL */
+ case 380: /* predicate ::= expression IS NOT NULL */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL));
}
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
- case 384: /* compare_op ::= NK_LT */
-{ yymsp[0].minor.yy198 = OP_TYPE_LOWER_THAN; }
+ case 382: /* compare_op ::= NK_LT */
+{ yymsp[0].minor.yy392 = OP_TYPE_LOWER_THAN; }
break;
- case 385: /* compare_op ::= NK_GT */
-{ yymsp[0].minor.yy198 = OP_TYPE_GREATER_THAN; }
+ case 383: /* compare_op ::= NK_GT */
+{ yymsp[0].minor.yy392 = OP_TYPE_GREATER_THAN; }
break;
- case 386: /* compare_op ::= NK_LE */
-{ yymsp[0].minor.yy198 = OP_TYPE_LOWER_EQUAL; }
+ case 384: /* compare_op ::= NK_LE */
+{ yymsp[0].minor.yy392 = OP_TYPE_LOWER_EQUAL; }
break;
- case 387: /* compare_op ::= NK_GE */
-{ yymsp[0].minor.yy198 = OP_TYPE_GREATER_EQUAL; }
+ case 385: /* compare_op ::= NK_GE */
+{ yymsp[0].minor.yy392 = OP_TYPE_GREATER_EQUAL; }
break;
- case 388: /* compare_op ::= NK_NE */
-{ yymsp[0].minor.yy198 = OP_TYPE_NOT_EQUAL; }
+ case 386: /* compare_op ::= NK_NE */
+{ yymsp[0].minor.yy392 = OP_TYPE_NOT_EQUAL; }
break;
- case 389: /* compare_op ::= NK_EQ */
-{ yymsp[0].minor.yy198 = OP_TYPE_EQUAL; }
+ case 387: /* compare_op ::= NK_EQ */
+{ yymsp[0].minor.yy392 = OP_TYPE_EQUAL; }
break;
- case 390: /* compare_op ::= LIKE */
-{ yymsp[0].minor.yy198 = OP_TYPE_LIKE; }
+ case 388: /* compare_op ::= LIKE */
+{ yymsp[0].minor.yy392 = OP_TYPE_LIKE; }
break;
- case 391: /* compare_op ::= NOT LIKE */
-{ yymsp[-1].minor.yy198 = OP_TYPE_NOT_LIKE; }
+ case 389: /* compare_op ::= NOT LIKE */
+{ yymsp[-1].minor.yy392 = OP_TYPE_NOT_LIKE; }
break;
- case 392: /* compare_op ::= MATCH */
-{ yymsp[0].minor.yy198 = OP_TYPE_MATCH; }
+ case 390: /* compare_op ::= MATCH */
+{ yymsp[0].minor.yy392 = OP_TYPE_MATCH; }
break;
- case 393: /* compare_op ::= NMATCH */
-{ yymsp[0].minor.yy198 = OP_TYPE_NMATCH; }
+ case 391: /* compare_op ::= NMATCH */
+{ yymsp[0].minor.yy392 = OP_TYPE_NMATCH; }
break;
- case 394: /* compare_op ::= CONTAINS */
-{ yymsp[0].minor.yy198 = OP_TYPE_JSON_CONTAINS; }
+ case 392: /* compare_op ::= CONTAINS */
+{ yymsp[0].minor.yy392 = OP_TYPE_JSON_CONTAINS; }
break;
- case 395: /* in_op ::= IN */
-{ yymsp[0].minor.yy198 = OP_TYPE_IN; }
+ case 393: /* in_op ::= IN */
+{ yymsp[0].minor.yy392 = OP_TYPE_IN; }
break;
- case 396: /* in_op ::= NOT IN */
-{ yymsp[-1].minor.yy198 = OP_TYPE_NOT_IN; }
+ case 394: /* in_op ::= NOT IN */
+{ yymsp[-1].minor.yy392 = OP_TYPE_NOT_IN; }
break;
- case 397: /* in_predicate_value ::= NK_LP literal_list NK_RP */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy544)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 395: /* in_predicate_value ::= NK_LP literal_list NK_RP */
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy172)); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 399: /* boolean_value_expression ::= NOT boolean_primary */
+ case 397: /* boolean_value_expression ::= NOT boolean_primary */
{
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy840), NULL));
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy272), NULL));
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
- case 400: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
+ case 398: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 401: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
+ case 399: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
+ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ break;
+ case 405: /* from_clause_opt ::= FROM table_reference_list */
+ case 434: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==434);
+ case 457: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==457);
+{ yymsp[-1].minor.yy272 = yymsp[0].minor.yy272; }
break;
- case 409: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */
-{ yylhsminor.yy840 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, NULL); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 407: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */
+{ yylhsminor.yy272 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, NULL); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 412: /* table_primary ::= table_name alias_opt */
-{ yylhsminor.yy840 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 410: /* table_primary ::= table_name alias_opt */
+{ yylhsminor.yy272 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
- case 413: /* table_primary ::= db_name NK_DOT table_name alias_opt */
-{ yylhsminor.yy840 = createRealTableNode(pCxt, &yymsp[-3].minor.yy617, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 411: /* table_primary ::= db_name NK_DOT table_name alias_opt */
+{ yylhsminor.yy272 = createRealTableNode(pCxt, &yymsp[-3].minor.yy209, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
- case 414: /* table_primary ::= subquery alias_opt */
-{ yylhsminor.yy840 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840), &yymsp[0].minor.yy617); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 412: /* table_primary ::= subquery alias_opt */
+{ yylhsminor.yy272 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272), &yymsp[0].minor.yy209); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
- case 416: /* alias_opt ::= */
-{ yymsp[1].minor.yy617 = nil_token; }
+ case 414: /* alias_opt ::= */
+{ yymsp[1].minor.yy209 = nil_token; }
break;
- case 417: /* alias_opt ::= table_alias */
-{ yylhsminor.yy617 = yymsp[0].minor.yy617; }
- yymsp[0].minor.yy617 = yylhsminor.yy617;
+ case 415: /* alias_opt ::= table_alias */
+{ yylhsminor.yy209 = yymsp[0].minor.yy209; }
+ yymsp[0].minor.yy209 = yylhsminor.yy209;
break;
- case 418: /* alias_opt ::= AS table_alias */
-{ yymsp[-1].minor.yy617 = yymsp[0].minor.yy617; }
+ case 416: /* alias_opt ::= AS table_alias */
+{ yymsp[-1].minor.yy209 = yymsp[0].minor.yy209; }
break;
- case 419: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */
- case 420: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==420);
-{ yymsp[-2].minor.yy840 = yymsp[-1].minor.yy840; }
+ case 417: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */
+ case 418: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==418);
+{ yymsp[-2].minor.yy272 = yymsp[-1].minor.yy272; }
break;
- case 421: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
-{ yylhsminor.yy840 = createJoinTableNode(pCxt, yymsp[-4].minor.yy708, yymsp[-5].minor.yy840, yymsp[-2].minor.yy840, yymsp[0].minor.yy840); }
- yymsp[-5].minor.yy840 = yylhsminor.yy840;
+ case 419: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
+{ yylhsminor.yy272 = createJoinTableNode(pCxt, yymsp[-4].minor.yy156, yymsp[-5].minor.yy272, yymsp[-2].minor.yy272, yymsp[0].minor.yy272); }
+ yymsp[-5].minor.yy272 = yylhsminor.yy272;
break;
- case 422: /* join_type ::= */
-{ yymsp[1].minor.yy708 = JOIN_TYPE_INNER; }
+ case 420: /* join_type ::= */
+{ yymsp[1].minor.yy156 = JOIN_TYPE_INNER; }
break;
- case 423: /* join_type ::= INNER */
-{ yymsp[0].minor.yy708 = JOIN_TYPE_INNER; }
+ case 421: /* join_type ::= INNER */
+{ yymsp[0].minor.yy156 = JOIN_TYPE_INNER; }
break;
- case 424: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
+ case 422: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
{
- yymsp[-11].minor.yy840 = createSelectStmt(pCxt, yymsp[-10].minor.yy313, yymsp[-9].minor.yy544, yymsp[-8].minor.yy840);
- yymsp[-11].minor.yy840 = addWhereClause(pCxt, yymsp[-11].minor.yy840, yymsp[-7].minor.yy840);
- yymsp[-11].minor.yy840 = addPartitionByClause(pCxt, yymsp[-11].minor.yy840, yymsp[-6].minor.yy544);
- yymsp[-11].minor.yy840 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy840, yymsp[-2].minor.yy840);
- yymsp[-11].minor.yy840 = addGroupByClause(pCxt, yymsp[-11].minor.yy840, yymsp[-1].minor.yy544);
- yymsp[-11].minor.yy840 = addHavingClause(pCxt, yymsp[-11].minor.yy840, yymsp[0].minor.yy840);
- yymsp[-11].minor.yy840 = addRangeClause(pCxt, yymsp[-11].minor.yy840, yymsp[-5].minor.yy840);
- yymsp[-11].minor.yy840 = addEveryClause(pCxt, yymsp[-11].minor.yy840, yymsp[-4].minor.yy840);
- yymsp[-11].minor.yy840 = addFillClause(pCxt, yymsp[-11].minor.yy840, yymsp[-3].minor.yy840);
+ yymsp[-11].minor.yy272 = createSelectStmt(pCxt, yymsp[-10].minor.yy293, yymsp[-9].minor.yy172, yymsp[-8].minor.yy272);
+ yymsp[-11].minor.yy272 = addWhereClause(pCxt, yymsp[-11].minor.yy272, yymsp[-7].minor.yy272);
+ yymsp[-11].minor.yy272 = addPartitionByClause(pCxt, yymsp[-11].minor.yy272, yymsp[-6].minor.yy172);
+ yymsp[-11].minor.yy272 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy272, yymsp[-2].minor.yy272);
+ yymsp[-11].minor.yy272 = addGroupByClause(pCxt, yymsp[-11].minor.yy272, yymsp[-1].minor.yy172);
+ yymsp[-11].minor.yy272 = addHavingClause(pCxt, yymsp[-11].minor.yy272, yymsp[0].minor.yy272);
+ yymsp[-11].minor.yy272 = addRangeClause(pCxt, yymsp[-11].minor.yy272, yymsp[-5].minor.yy272);
+ yymsp[-11].minor.yy272 = addEveryClause(pCxt, yymsp[-11].minor.yy272, yymsp[-4].minor.yy272);
+ yymsp[-11].minor.yy272 = addFillClause(pCxt, yymsp[-11].minor.yy272, yymsp[-3].minor.yy272);
}
break;
- case 427: /* set_quantifier_opt ::= ALL */
-{ yymsp[0].minor.yy313 = false; }
+ case 425: /* set_quantifier_opt ::= ALL */
+{ yymsp[0].minor.yy293 = false; }
break;
- case 430: /* select_item ::= NK_STAR */
-{ yylhsminor.yy840 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 428: /* select_item ::= NK_STAR */
+{ yylhsminor.yy272 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy272 = yylhsminor.yy272;
break;
- case 432: /* select_item ::= common_expression column_alias */
-{ yylhsminor.yy840 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840), &yymsp[0].minor.yy617); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 430: /* select_item ::= common_expression column_alias */
+{ yylhsminor.yy272 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272), &yymsp[0].minor.yy209); }
+ yymsp[-1].minor.yy272 = yylhsminor.yy272;
break;
- case 433: /* select_item ::= common_expression AS column_alias */
-{ yylhsminor.yy840 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), &yymsp[0].minor.yy617); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 431: /* select_item ::= common_expression AS column_alias */
+{ yylhsminor.yy272 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), &yymsp[0].minor.yy209); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 438: /* partition_by_clause_opt ::= PARTITION BY expression_list */
- case 455: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==455);
- case 471: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==471);
-{ yymsp[-2].minor.yy544 = yymsp[0].minor.yy544; }
+ case 436: /* partition_by_clause_opt ::= PARTITION BY expression_list */
+ case 453: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==453);
+ case 469: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==469);
+{ yymsp[-2].minor.yy172 = yymsp[0].minor.yy172; }
break;
- case 440: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
-{ yymsp[-5].minor.yy840 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); }
+ case 438: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
+{ yymsp[-5].minor.yy272 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); }
break;
- case 441: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
-{ yymsp[-3].minor.yy840 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); }
+ case 439: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
+{ yymsp[-3].minor.yy272 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); }
break;
- case 442: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
-{ yymsp[-5].minor.yy840 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+ case 440: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
+{ yymsp[-5].minor.yy272 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
break;
- case 443: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
-{ yymsp[-7].minor.yy840 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+ case 441: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
+{ yymsp[-7].minor.yy272 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
break;
- case 445: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
- case 463: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==463);
-{ yymsp[-3].minor.yy840 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy840); }
+ case 443: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
+ case 461: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==461);
+{ yymsp[-3].minor.yy272 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy272); }
break;
- case 447: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */
-{ yymsp[-3].minor.yy840 = createFillNode(pCxt, yymsp[-1].minor.yy816, NULL); }
+ case 445: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */
+{ yymsp[-3].minor.yy272 = createFillNode(pCxt, yymsp[-1].minor.yy186, NULL); }
break;
- case 448: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
-{ yymsp[-5].minor.yy840 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy544)); }
+ case 446: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
+{ yymsp[-5].minor.yy272 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy172)); }
break;
- case 449: /* fill_mode ::= NONE */
-{ yymsp[0].minor.yy816 = FILL_MODE_NONE; }
+ case 447: /* fill_mode ::= NONE */
+{ yymsp[0].minor.yy186 = FILL_MODE_NONE; }
break;
- case 450: /* fill_mode ::= PREV */
-{ yymsp[0].minor.yy816 = FILL_MODE_PREV; }
+ case 448: /* fill_mode ::= PREV */
+{ yymsp[0].minor.yy186 = FILL_MODE_PREV; }
break;
- case 451: /* fill_mode ::= NULL */
-{ yymsp[0].minor.yy816 = FILL_MODE_NULL; }
+ case 449: /* fill_mode ::= NULL */
+{ yymsp[0].minor.yy186 = FILL_MODE_NULL; }
break;
- case 452: /* fill_mode ::= LINEAR */
-{ yymsp[0].minor.yy816 = FILL_MODE_LINEAR; }
+ case 450: /* fill_mode ::= LINEAR */
+{ yymsp[0].minor.yy186 = FILL_MODE_LINEAR; }
break;
- case 453: /* fill_mode ::= NEXT */
-{ yymsp[0].minor.yy816 = FILL_MODE_NEXT; }
+ case 451: /* fill_mode ::= NEXT */
+{ yymsp[0].minor.yy186 = FILL_MODE_NEXT; }
break;
- case 456: /* group_by_list ::= expression */
-{ yylhsminor.yy544 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+ case 454: /* group_by_list ::= expression */
+{ yylhsminor.yy172 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); }
+ yymsp[0].minor.yy172 = yylhsminor.yy172;
break;
- case 457: /* group_by_list ::= group_by_list NK_COMMA expression */
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
+ case 455: /* group_by_list ::= group_by_list NK_COMMA expression */
+{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); }
+ yymsp[-2].minor.yy172 = yylhsminor.yy172;
break;
- case 461: /* range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
-{ yymsp[-5].minor.yy840 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); }
+ case 459: /* range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
+{ yymsp[-5].minor.yy272 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); }
break;
- case 464: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ case 462: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
{
- yylhsminor.yy840 = addOrderByClause(pCxt, yymsp[-3].minor.yy840, yymsp[-2].minor.yy544);
- yylhsminor.yy840 = addSlimitClause(pCxt, yylhsminor.yy840, yymsp[-1].minor.yy840);
- yylhsminor.yy840 = addLimitClause(pCxt, yylhsminor.yy840, yymsp[0].minor.yy840);
+ yylhsminor.yy272 = addOrderByClause(pCxt, yymsp[-3].minor.yy272, yymsp[-2].minor.yy172);
+ yylhsminor.yy272 = addSlimitClause(pCxt, yylhsminor.yy272, yymsp[-1].minor.yy272);
+ yylhsminor.yy272 = addLimitClause(pCxt, yylhsminor.yy272, yymsp[0].minor.yy272);
}
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
- case 466: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */
-{ yylhsminor.yy840 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy840, yymsp[0].minor.yy840); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 464: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */
+{ yylhsminor.yy272 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy272, yymsp[0].minor.yy272); }
+ yymsp[-3].minor.yy272 = yylhsminor.yy272;
break;
- case 467: /* query_expression_body ::= query_expression_body UNION query_expression_body */
-{ yylhsminor.yy840 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy840, yymsp[0].minor.yy840); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 465: /* query_expression_body ::= query_expression_body UNION query_expression_body */
+{ yylhsminor.yy272 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy272, yymsp[0].minor.yy272); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 469: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
+ case 467: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
{
- yymsp[-5].minor.yy840 = addOrderByClause(pCxt, yymsp[-4].minor.yy840, yymsp[-3].minor.yy544);
- yymsp[-5].minor.yy840 = addSlimitClause(pCxt, yymsp[-5].minor.yy840, yymsp[-2].minor.yy840);
- yymsp[-5].minor.yy840 = addLimitClause(pCxt, yymsp[-5].minor.yy840, yymsp[-1].minor.yy840);
+ yymsp[-5].minor.yy272 = addOrderByClause(pCxt, yymsp[-4].minor.yy272, yymsp[-3].minor.yy172);
+ yymsp[-5].minor.yy272 = addSlimitClause(pCxt, yymsp[-5].minor.yy272, yymsp[-2].minor.yy272);
+ yymsp[-5].minor.yy272 = addLimitClause(pCxt, yymsp[-5].minor.yy272, yymsp[-1].minor.yy272);
}
break;
- case 473: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
- case 477: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==477);
-{ yymsp[-1].minor.yy840 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
+ case 471: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ case 475: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==475);
+{ yymsp[-1].minor.yy272 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
break;
- case 474: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- case 478: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==478);
-{ yymsp[-3].minor.yy840 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
+ case 472: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ case 476: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==476);
+{ yymsp[-3].minor.yy272 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 475: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- case 479: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==479);
-{ yymsp[-3].minor.yy840 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
+ case 473: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ case 477: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==477);
+{ yymsp[-3].minor.yy272 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
break;
- case 480: /* subquery ::= NK_LP query_expression NK_RP */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy840); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 478: /* subquery ::= NK_LP query_expression NK_RP */
+{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy272); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 484: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */
-{ yylhsminor.yy840 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), yymsp[-1].minor.yy204, yymsp[0].minor.yy277); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 482: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */
+{ yylhsminor.yy272 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), yymsp[-1].minor.yy818, yymsp[0].minor.yy493); }
+ yymsp[-2].minor.yy272 = yylhsminor.yy272;
break;
- case 485: /* ordering_specification_opt ::= */
-{ yymsp[1].minor.yy204 = ORDER_ASC; }
+ case 483: /* ordering_specification_opt ::= */
+{ yymsp[1].minor.yy818 = ORDER_ASC; }
break;
- case 486: /* ordering_specification_opt ::= ASC */
-{ yymsp[0].minor.yy204 = ORDER_ASC; }
+ case 484: /* ordering_specification_opt ::= ASC */
+{ yymsp[0].minor.yy818 = ORDER_ASC; }
break;
- case 487: /* ordering_specification_opt ::= DESC */
-{ yymsp[0].minor.yy204 = ORDER_DESC; }
+ case 485: /* ordering_specification_opt ::= DESC */
+{ yymsp[0].minor.yy818 = ORDER_DESC; }
break;
- case 488: /* null_ordering_opt ::= */
-{ yymsp[1].minor.yy277 = NULL_ORDER_DEFAULT; }
+ case 486: /* null_ordering_opt ::= */
+{ yymsp[1].minor.yy493 = NULL_ORDER_DEFAULT; }
break;
- case 489: /* null_ordering_opt ::= NULLS FIRST */
-{ yymsp[-1].minor.yy277 = NULL_ORDER_FIRST; }
+ case 487: /* null_ordering_opt ::= NULLS FIRST */
+{ yymsp[-1].minor.yy493 = NULL_ORDER_FIRST; }
break;
- case 490: /* null_ordering_opt ::= NULLS LAST */
-{ yymsp[-1].minor.yy277 = NULL_ORDER_LAST; }
+ case 488: /* null_ordering_opt ::= NULLS LAST */
+{ yymsp[-1].minor.yy493 = NULL_ORDER_LAST; }
break;
default:
break;
diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp
index b376c33d1aca8951ed31297cd12a1843ebf47462..cd7a9d549a1faab8994f71e7bf659c3a45f2cc01 100644
--- a/source/libs/parser/test/mockCatalog.cpp
+++ b/source/libs/parser/test/mockCatalog.cpp
@@ -137,7 +137,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1)
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
@@ -149,7 +149,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1)
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index 9bca6cae0a41a145237b1035c5dd1edb4fdf0cd9..8455da12d776b2c1519aae1b4c176ce263873a46 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -111,9 +111,9 @@ TEST_F(ParserInitialCTest, createDatabase) {
expect.numOfVgroups = TSDB_DEFAULT_VN_PER_DB;
expect.numOfStables = TSDB_DEFAULT_DB_SINGLE_STABLE;
expect.schemaless = TSDB_DEFAULT_DB_SCHEMALESS;
- expect.walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
- expect.walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- expect.walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ expect.walRetentionPeriod = TSDB_REP_DEF_DB_WAL_RET_PERIOD;
+ expect.walRetentionSize = TSDB_REP_DEF_DB_WAL_RET_SIZE;
+ expect.walRollPeriod = TSDB_REP_DEF_DB_WAL_ROLL_PERIOD;
expect.walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
};
@@ -266,6 +266,14 @@ TEST_F(ParserInitialCTest, createDatabase) {
"DURATION 100m "
"KEEP 1440m,300h,400d ");
clearCreateDbReq();
+
+ setCreateDbReqFunc("wxy_db", 1);
+ setDbReplicaFunc(3);
+ setDbWalRetentionPeriod(TSDB_REPS_DEF_DB_WAL_RET_PERIOD);
+ setDbWalRetentionSize(TSDB_REPS_DEF_DB_WAL_RET_SIZE);
+ setDbWalRollPeriod(TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD);
+ run("CREATE DATABASE IF NOT EXISTS wxy_db REPLICA 3");
+ clearCreateDbReq();
}
TEST_F(ParserInitialCTest, createDatabaseSemanticCheck) {
@@ -568,15 +576,13 @@ TEST_F(ParserInitialCTest, createStream) {
memset(&expect, 0, sizeof(SCMCreateStreamReq));
};
- auto setCreateStreamReqFunc = [&](const char* pStream, const char* pSrcDb, const char* pSql,
- const char* pDstStb = nullptr, int8_t igExists = 0,
- int8_t triggerType = STREAM_TRIGGER_AT_ONCE, int64_t maxDelay = 0,
- int64_t watermark = 0, int8_t igExpired = STREAM_DEFAULT_IGNORE_EXPIRED) {
+ auto setCreateStreamReqFunc = [&](const char* pStream, const char* pSrcDb, const char* pSql, const char* pDstStb,
+ int8_t igExists = 0, int8_t triggerType = STREAM_TRIGGER_AT_ONCE,
+ int64_t maxDelay = 0, int64_t watermark = 0,
+ int8_t igExpired = STREAM_DEFAULT_IGNORE_EXPIRED) {
snprintf(expect.name, sizeof(expect.name), "0.%s", pStream);
snprintf(expect.sourceDB, sizeof(expect.sourceDB), "0.%s", pSrcDb);
- if (NULL != pDstStb) {
- snprintf(expect.targetStbFullName, sizeof(expect.targetStbFullName), "0.test.%s", pDstStb);
- }
+ snprintf(expect.targetStbFullName, sizeof(expect.targetStbFullName), "0.test.%s", pDstStb);
expect.igExists = igExists;
expect.sql = strdup(pSql);
expect.triggerType = triggerType;
@@ -603,15 +609,6 @@ TEST_F(ParserInitialCTest, createStream) {
tFreeSCMCreateStreamReq(&req);
});
- setCreateStreamReqFunc("s1", "test", "create stream s1 as select count(*) from t1 interval(10s)");
- run("CREATE STREAM s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
- clearCreateStreamReq();
-
- setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select count(*) from t1 interval(10s)",
- nullptr, 1);
- run("CREATE STREAM IF NOT EXISTS s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
- clearCreateStreamReq();
-
setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select count(*) from t1 interval(10s)", "st1");
run("CREATE STREAM s1 INTO st1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
clearCreateStreamReq();
@@ -629,7 +626,8 @@ TEST_F(ParserInitialCTest, createStream) {
TEST_F(ParserInitialCTest, createStreamSemanticCheck) {
useDb("root", "test");
- run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC);
+ run("CREATE STREAM s1 INTO st1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)",
+ TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC);
}
TEST_F(ParserInitialCTest, createTable) {
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 98281b7bf070095b4bb23326b156d5e8764690de..360b904c170e50682b17d9c99a8ec1cd679a6db0 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -207,6 +207,7 @@ class ParserTestBaseImpl {
pCxt->db = caseEnv_.db_.c_str();
pCxt->pUser = caseEnv_.user_.c_str();
pCxt->isSuperUser = caseEnv_.user_ == "root";
+ pCxt->enableSysInfo = true;
pCxt->pSql = stmtEnv_.sql_.c_str();
pCxt->sqlLen = stmtEnv_.sql_.length();
pCxt->pMsg = stmtEnv_.msgBuf_.data();
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 71f084d41226fee17bd0b8c0d63f69ad07ca3a20..bf72f5210577d6f43f8ae97d098091b3020aeb16 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -44,12 +44,15 @@ static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol) {
pCol->colType = COLUMN_TYPE_TBNAME;
break;
case FUNCTION_TYPE_WSTART:
+ pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ pCol->colType = COLUMN_TYPE_WINDOW_START;
+ break;
case FUNCTION_TYPE_WEND:
pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
- pCol->colType = COLUMN_TYPE_WINDOW_PC;
+ pCol->colType = COLUMN_TYPE_WINDOW_END;
break;
case FUNCTION_TYPE_WDURATION:
- pCol->colType = COLUMN_TYPE_WINDOW_PC;
+ pCol->colType = COLUMN_TYPE_WINDOW_DURATION;
break;
case FUNCTION_TYPE_GROUP_KEY:
pCol->colType = COLUMN_TYPE_GROUP_KEY;
@@ -194,28 +197,21 @@ static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols
return SCAN_TYPE_TABLE;
}
-static SNode* createPrimaryKeyCol(uint64_t tableId) {
+static SNode* createFirstCol(uint64_t tableId, const SSchema* pSchema) {
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
if (NULL == pCol) {
return NULL;
}
- pCol->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
- pCol->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
+ pCol->node.resType.type = pSchema->type;
+ pCol->node.resType.bytes = pSchema->bytes;
pCol->tableId = tableId;
- pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ pCol->colId = pSchema->colId;
pCol->colType = COLUMN_TYPE_COLUMN;
- strcpy(pCol->colName, "#primarykey");
+ strcpy(pCol->colName, pSchema->name);
return (SNode*)pCol;
}
-static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
- if (NULL == *pCols) {
- *pCols = nodesMakeList();
- if (NULL == *pCols) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
-
+static int32_t addPrimaryKeyCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
bool found = false;
SNode* pCol = NULL;
FOREACH(pCol, *pCols) {
@@ -226,13 +222,25 @@ static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
}
if (!found) {
- if (TSDB_CODE_SUCCESS != nodesListStrictAppend(*pCols, createPrimaryKeyCol(tableId))) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
}
return TSDB_CODE_SUCCESS;
}
+static int32_t addSystableFirstCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
+ if (LIST_LENGTH(*pCols) > 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
+}
+
+static int32_t addDefaultScanCol(const STableMeta* pMeta, SNodeList** pCols) {
+ if (TSDB_SYSTEM_TABLE == pMeta->tableType) {
+ return addSystableFirstCol(pMeta->uid, pMeta->schema, pCols);
+ }
+ return addPrimaryKeyCol(pMeta->uid, pMeta->schema, pCols);
+}
+
static int32_t makeScanLogicNode(SLogicPlanContext* pCxt, SRealTableNode* pRealTable, bool hasRepeatScanFuncs,
SLogicNode** pLogicNode) {
SScanLogicNode* pScan = (SScanLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SCAN);
@@ -296,8 +304,8 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pScan->hasNormalCols = true;
}
- if (TSDB_CODE_SUCCESS == code && SCAN_TYPE_SYSTEM_TABLE != pScan->scanType) {
- code = addPrimaryKeyCol(pScan->tableId, &pScan->pScanCols);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = addDefaultScanCol(pRealTable->pMeta, &pScan->pScanCols);
}
// set output
@@ -784,7 +792,8 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele
static EDealRes needFillValueImpl(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
SColumnNode* pCol = (SColumnNode*)pNode;
- if (COLUMN_TYPE_WINDOW_PC != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) {
+ if (COLUMN_TYPE_WINDOW_START != pCol->colType && COLUMN_TYPE_WINDOW_END != pCol->colType &&
+ COLUMN_TYPE_WINDOW_DURATION != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) {
*(bool*)pContext = true;
return DEAL_RES_END;
}
@@ -1002,7 +1011,8 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
int32_t code =
nodesCollectColumns(pSelect, SQL_CLAUSE_PARTITION_BY, NULL, COLLECT_COL_TYPE_ALL, &pPartition->node.pTargets);
if (TSDB_CODE_SUCCESS == code && NULL == pPartition->node.pTargets) {
- code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
+ code = nodesListMakeStrictAppend(&pPartition->node.pTargets,
+ nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
}
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 45ab3903a9e9eb6df844244b6fc7cd8d009ebd47..b160f45479a8e11a160fc092b7af536c4165436a 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -16,6 +16,7 @@
#include "filter.h"
#include "functionMgt.h"
#include "planInt.h"
+#include "tglobal.h"
#include "ttime.h"
#define OPTIMIZE_FLAG_MASK(n) (1 << n)
@@ -1084,7 +1085,7 @@ static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool* pNot
switch (nodeType(pNode)) {
case QUERY_NODE_LOGIC_PLAN_SCAN: {
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
- if (NULL != pScan->pGroupTags) {
+ if (NULL != pScan->pGroupTags || TSDB_SYSTEM_TABLE == pScan->tableType) {
*pNotOptimize = true;
return TSDB_CODE_SUCCESS;
}
@@ -1665,7 +1666,10 @@ static bool eliminateProjOptMayBeOptimized(SLogicNode* pNode) {
return false;
}
- if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren)) {
+ // Super table scan requires project operator to merge packets to improve performance.
+ if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren) ||
+ (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0)) &&
+ TSDB_SUPER_TABLE == ((SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0))->tableType)) {
return false;
}
@@ -2407,7 +2411,7 @@ static const SOptimizeRule optimizeRuleSet[] = {
static const int32_t optimizeRuleNum = (sizeof(optimizeRuleSet) / sizeof(SOptimizeRule));
static void dumpLogicSubplan(const char* pRuleName, SLogicSubplan* pSubplan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index c7eb6f7b5e61fdd1d4c29cc88a8b980bc1efdf79..0cbb833a4d4506b5123b45a0184bbc6023b53c2a 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -576,6 +576,7 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan*
pScan->showRewrite = pScanLogicNode->showRewrite;
pScan->accountId = pCxt->pPlanCxt->acctId;
+ pScan->sysInfo = pCxt->pPlanCxt->sysInfo;
if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLES) ||
0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLE_DISTRIBUTED) ||
0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TAGS)) {
@@ -1323,7 +1324,8 @@ static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
static int32_t createPartitionPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
SPartitionLogicNode* pPartLogicNode, SPhysiNode** pPhyNode) {
SPartitionPhysiNode* pPart =
- (SPartitionPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pPartLogicNode, QUERY_NODE_PHYSICAL_PLAN_PARTITION);
+ (SPartitionPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pPartLogicNode,
+ pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION : QUERY_NODE_PHYSICAL_PLAN_PARTITION);
if (NULL == pPart) {
return TSDB_CODE_OUT_OF_MEMORY;
}
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index c582994b7c319778477238ab26b99ce844cb8c1c..beb938b161ca1656f09d15c559351aa2e081df2a 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -1427,7 +1427,7 @@ static const SSplitRule splitRuleSet[] = {
static const int32_t splitRuleNum = (sizeof(splitRuleSet) / sizeof(SSplitRule));
static void dumpLogicSubplan(const char* pRuleName, SLogicSubplan* pSubplan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c
index c1296982e0217ae9b3c2e67b210f1922492cf547..baa1d1074c7d4bea0df280649777db4a659247cb 100644
--- a/source/libs/planner/src/planner.c
+++ b/source/libs/planner/src/planner.c
@@ -17,9 +17,10 @@
#include "planInt.h"
#include "scalar.h"
+#include "tglobal.h"
static void dumpQueryPlan(SQueryPlan* pPlan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
diff --git a/source/libs/planner/test/planOtherTest.cpp b/source/libs/planner/test/planOtherTest.cpp
index 7107f8b3c94c616ae9db90132a59f2804b542aca..350ccd0d927c9773059cfb2c027a0ca2292e4d13 100644
--- a/source/libs/planner/test/planOtherTest.cpp
+++ b/source/libs/planner/test/planOtherTest.cpp
@@ -37,9 +37,9 @@ TEST_F(PlanOtherTest, createStream) {
TEST_F(PlanOtherTest, createStreamUseSTable) {
useDb("root", "test");
- run("CREATE STREAM IF NOT EXISTS s1 as SELECT COUNT(*) FROM st1 INTERVAL(10s)");
+ run("CREATE STREAM IF NOT EXISTS s1 into st1 as SELECT COUNT(*) FROM st1 INTERVAL(10s)");
- run("CREATE STREAM IF NOT EXISTS s1 as SELECT COUNT(*) FROM st1 PARTITION BY TBNAME INTERVAL(10s)");
+ run("CREATE STREAM IF NOT EXISTS s1 into st1 as SELECT COUNT(*) FROM st1 PARTITION BY TBNAME INTERVAL(10s)");
}
TEST_F(PlanOtherTest, createSmaIndex) {
diff --git a/source/libs/planner/test/planSysTbTest.cpp b/source/libs/planner/test/planSysTbTest.cpp
index 921f86f09a41d36448ab0d435ab6a439645b9bfc..6b40e381cc18cb75cc9271352cd654d31a74242b 100644
--- a/source/libs/planner/test/planSysTbTest.cpp
+++ b/source/libs/planner/test/planSysTbTest.cpp
@@ -32,3 +32,9 @@ TEST_F(PlanSysTableTest, informationSchema) {
run("SELECT * FROM information_schema.ins_databases WHERE name = 'information_schema'");
}
+
+TEST_F(PlanSysTableTest, withAgg) {
+ useDb("root", "information_schema");
+
+ run("SELECT COUNT(1) FROM ins_users");
+}
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index 5fc8b3cf302612c9b8528a8380475b32a79a8824..96f7d29230bafc94639be35fcc56550c029ffbac 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -278,12 +278,12 @@ class PlannerTestBaseImpl {
}
void dump(DumpModule module) {
+ cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl;
+
if (DUMP_MODULE_NOTHING == module) {
return;
}
- cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl;
-
if (DUMP_MODULE_ALL == module || DUMP_MODULE_PARSER == module) {
if (res_.prepareAst_.empty()) {
cout << "+++++++++++++++++++++syntax tree : " << endl;
@@ -343,6 +343,7 @@ class PlannerTestBaseImpl {
cxt.pMsg = stmtEnv_.msgBuf_.data();
cxt.msgLen = stmtEnv_.msgBuf_.max_size();
cxt.svrVer = "3.0.0.0";
+ cxt.enableSysInfo = true;
if (prepare) {
SStmtCallback stmtCb = {0};
cxt.pStmtCb = &stmtCb;
diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c
index 5143aa4af1f90ba0e7a0ac2f37af6648ed68c685..d848016e46482614972d5e85469e4297136d6cc0 100644
--- a/source/libs/qcom/src/queryUtil.c
+++ b/source/libs/qcom/src/queryUtil.c
@@ -213,15 +213,25 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam
return s;
}
+void freeSTableMetaRspPointer(void *p) {
+ tFreeSTableMetaRsp(*(void**)p);
+ taosMemoryFreeClear(*(void**)p);
+}
+
void destroyQueryExecRes(SExecResult* pRes) {
if (NULL == pRes || NULL == pRes->res) {
return;
}
switch (pRes->msgType) {
+ case TDMT_VND_CREATE_TABLE: {
+ taosArrayDestroyEx((SArray*)pRes->res, freeSTableMetaRspPointer);
+ break;
+ }
+ case TDMT_MND_CREATE_STB:
case TDMT_VND_ALTER_TABLE:
case TDMT_MND_ALTER_STB: {
- tFreeSTableMetaRsp((STableMetaRsp*)pRes->res);
+ tFreeSTableMetaRsp(pRes->res);
taosMemoryFreeClear(pRes->res);
break;
}
diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c
index ed8786170d0e37f677d1b731d08eafb511875023..e2d3ac1583926da6fe9d9aff82392c4fcc3c2d65 100644
--- a/source/libs/qcom/src/querymsg.c
+++ b/source/libs/qcom/src/querymsg.c
@@ -354,6 +354,19 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) {
return TSDB_CODE_SUCCESS;
}
+int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) {
+ pMeta->vgId = msg->vgId;
+ pMeta->tableType = msg->tableType;
+ pMeta->uid = msg->tuid;
+ pMeta->suid = msg->suid;
+
+ qDebug("ctable %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s suid %" PRIx64 ,
+ msg->tbName, pMeta->uid, pMeta->tableType, pMeta->vgId, msg->dbFName, pMeta->suid);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) {
int32_t total = msg->numOfColumns + msg->numOfTags;
int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total;
diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h
index 23693c785aa17921e5ba4420fe6477fa72b27392..e7695b2f04ea4fed2ebf9a77bf00717e1978003e 100644
--- a/source/libs/scalar/inc/filterInt.h
+++ b/source/libs/scalar/inc/filterInt.h
@@ -276,7 +276,7 @@ struct SFilterInfo {
#define FILTER_CLR_FLAG(st, f) st &= (~f)
#define SIMPLE_COPY_VALUES(dst, src) *((int64_t *)dst) = *((int64_t *)src)
-#define FILTER_PACKAGE_UNIT_HASH_KEY(v, optr, idx1, idx2) do { char *_t = (char *)v; _t[0] = optr; *(uint32_t *)(_t + 1) = idx1; *(uint32_t *)(_t + 3) = idx2; } while (0)
+#define FLT_PACKAGE_UNIT_HASH_KEY(v, op1, op2, lidx, ridx, ridx2) do { char *_t = (char *)(v); _t[0] = (op1); _t[1] = (op2); *(uint32_t *)(_t + 2) = (lidx); *(uint32_t *)(_t + 2 + sizeof(uint32_t)) = (ridx); } while (0)
#define FILTER_GREATER(cr,sflag,eflag) ((cr > 0) || ((cr == 0) && (FILTER_GET_FLAG(sflag,RANGE_FLG_EXCLUDE) || FILTER_GET_FLAG(eflag,RANGE_FLG_EXCLUDE))))
#define FILTER_COPY_RA(dst, src) do { (dst)->sflag = (src)->sflag; (dst)->eflag = (src)->eflag; (dst)->s = (src)->s; (dst)->e = (src)->e; } while (0)
@@ -350,6 +350,7 @@ struct SFilterInfo {
extern bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right);
extern __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr);
+extern __compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr);
#ifdef __cplusplus
}
diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h
index d423b92da7e83589aacc6d384c0e2cafa0949038..15e9026ddbc2eea8ad4e066519dd4bbea9767b7e 100644
--- a/source/libs/scalar/inc/sclInt.h
+++ b/source/libs/scalar/inc/sclInt.h
@@ -45,6 +45,9 @@ typedef struct SScalarCtx {
#define SCL_IS_CONST_CALC(_ctx) (NULL == (_ctx)->pBlockList)
//#define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type) && (((SValueNode *)_node)->placeholderNo <= 0))
#define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type))
+#define SCL_IS_COMPARISON_OPERATOR(_opType) ((_opType) >= OP_TYPE_GREATER_THAN && (_opType) < OP_TYPE_IS_NOT_UNKNOWN)
+#define SCL_DOWNGRADE_DATETYPE(_type) ((_type) == TSDB_DATA_TYPE_BIGINT || TSDB_DATA_TYPE_DOUBLE == (_type) || (_type) == TSDB_DATA_TYPE_UBIGINT)
+#define SCL_NO_NEED_CONVERT_COMPARISION(_ltype, _rtype, _optr) (IS_NUMERIC_TYPE(_ltype) && IS_NUMERIC_TYPE(_rtype) && ((_optr) >= OP_TYPE_GREATER_THAN && (_optr) <= OP_TYPE_NOT_EQUAL))
#define sclFatal(...) qFatal(__VA_ARGS__)
#define sclError(...) qError(__VA_ARGS__)
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 4377dbf14ec55dae53d41859af8480886f4cce51..9e676354374fce6c2e733ac8d42c45baef9bada8 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -132,6 +132,77 @@ __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val
compareChkNotInString, compareStrPatternNotMatch, compareWStrPatternNotMatch
};
+__compar_fn_t gInt8SignCompare[] = {
+ compareInt8Val, compareInt8Int16, compareInt8Int32, compareInt8Int64, compareInt8Float, compareInt8Double
+};
+__compar_fn_t gInt8UsignCompare[] = {
+ compareInt8Uint8, compareInt8Uint16, compareInt8Uint32, compareInt8Uint64
+};
+
+__compar_fn_t gInt16SignCompare[] = {
+ compareInt16Int8, compareInt16Val, compareInt16Int32, compareInt16Int64, compareInt16Float, compareInt16Double
+};
+__compar_fn_t gInt16UsignCompare[] = {
+ compareInt16Uint8, compareInt16Uint16, compareInt16Uint32, compareInt16Uint64
+};
+
+__compar_fn_t gInt32SignCompare[] = {
+ compareInt32Int8, compareInt32Int16, compareInt32Val, compareInt32Int64, compareInt32Float, compareInt32Double
+};
+__compar_fn_t gInt32UsignCompare[] = {
+ compareInt32Uint8, compareInt32Uint16, compareInt32Uint32, compareInt32Uint64
+};
+
+__compar_fn_t gInt64SignCompare[] = {
+ compareInt64Int8, compareInt64Int16, compareInt64Int32, compareInt64Val, compareInt64Float, compareInt64Double
+};
+__compar_fn_t gInt64UsignCompare[] = {
+ compareInt64Uint8, compareInt64Uint16, compareInt64Uint32, compareInt64Uint64
+};
+
+__compar_fn_t gFloatSignCompare[] = {
+ compareFloatInt8, compareFloatInt16, compareFloatInt32, compareFloatInt64, compareFloatVal, compareFloatDouble
+};
+__compar_fn_t gFloatUsignCompare[] = {
+ compareFloatUint8, compareFloatUint16, compareFloatUint32, compareFloatUint64
+};
+
+__compar_fn_t gDoubleSignCompare[] = {
+ compareDoubleInt8, compareDoubleInt16, compareDoubleInt32, compareDoubleInt64, compareDoubleFloat, compareDoubleVal
+};
+__compar_fn_t gDoubleUsignCompare[] = {
+ compareDoubleUint8, compareDoubleUint16, compareDoubleUint32, compareDoubleUint64
+};
+
+__compar_fn_t gUint8SignCompare[] = {
+ compareUint8Int8, compareUint8Int16, compareUint8Int32, compareUint8Int64, compareUint8Float, compareUint8Double
+};
+__compar_fn_t gUint8UsignCompare[] = {
+ compareUint8Val, compareUint8Uint16, compareUint8Uint32, compareUint8Uint64
+};
+
+__compar_fn_t gUint16SignCompare[] = {
+ compareUint16Int8, compareUint16Int16, compareUint16Int32, compareUint16Int64, compareUint16Float, compareUint16Double
+};
+__compar_fn_t gUint16UsignCompare[] = {
+ compareUint16Uint8, compareUint16Val, compareUint16Uint32, compareUint16Uint64
+};
+
+__compar_fn_t gUint32SignCompare[] = {
+ compareUint32Int8, compareUint32Int16, compareUint32Int32, compareUint32Int64, compareUint32Float, compareUint32Double
+};
+__compar_fn_t gUint32UsignCompare[] = {
+ compareUint32Uint8, compareUint32Uint16, compareUint32Val, compareUint32Uint64
+};
+
+__compar_fn_t gUint64SignCompare[] = {
+ compareUint64Int8, compareUint64Int16, compareUint64Int32, compareUint64Int64, compareUint64Float, compareUint64Double
+};
+__compar_fn_t gUint64UsignCompare[] = {
+ compareUint64Uint8, compareUint64Uint16, compareUint64Uint32, compareUint64Val
+};
+
+
int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
int8_t comparFn = 0;
@@ -257,6 +328,93 @@ __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr) {
return gDataCompare[filterGetCompFuncIdx(type, optr)];
}
+__compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr) {
+ switch (lType) {
+ case TSDB_DATA_TYPE_TINYINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt8SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_SMALLINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt16SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_INT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt32SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_BIGINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt64SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gFloatSignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gFloatUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gDoubleSignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gDoubleUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint8SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_USMALLINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint16SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint32SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint64SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return NULL;
+}
static FORCE_INLINE int32_t filterCompareGroupCtx(const void *pLeft, const void *pRight) {
SFilterGroupCtx *left = *((SFilterGroupCtx**)pLeft), *right = *((SFilterGroupCtx**)pRight);
@@ -910,14 +1068,14 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f
return TSDB_CODE_SUCCESS;
}
-int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint8_t optr2, SFilterFieldId *right2, uint32_t *uidx) {
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
if (info->pctx.unitHash == NULL) {
info->pctx.unitHash = taosHashInit(FILTER_DEFAULT_GROUP_SIZE * FILTER_DEFAULT_UNIT_SIZE, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, false);
} else {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- void *hu = taosHashGet(info->pctx.unitHash, &v, sizeof(v));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ void *hu = taosHashGet(info->pctx.unitHash, v, sizeof(v));
if (hu) {
*uidx = *(uint32_t *)hu;
return TSDB_CODE_SUCCESS;
@@ -939,7 +1097,11 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
if (right) {
u->right = *right;
}
-
+ u->compare.optr2 = optr2;
+ if (right2) {
+ u->right2 = *right2;
+ }
+
if (u->right.type == FLD_TYPE_VALUE) {
SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u);
assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE));
@@ -960,9 +1122,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
*uidx = info->unitNum;
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- taosHashPut(info->pctx.unitHash, &v, sizeof(v), uidx, sizeof(*uidx));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ taosHashPut(info->pctx.unitHash, v, sizeof(v), uidx, sizeof(*uidx));
}
++info->unitNum;
@@ -971,6 +1133,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
}
+int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+ return filterAddUnitImpl(info, optr, left, right, 0, NULL, uidx);
+}
int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) {
if (group->unitNum >= group->unitSize) {
@@ -1147,8 +1312,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &ra->e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
return TSDB_CODE_SUCCESS;
}
@@ -1222,8 +1387,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &r->ra.e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
}
@@ -2073,6 +2238,44 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t
return TSDB_CODE_SUCCESS;
}
+bool filterIsSameUnits(SFilterColInfo* pCol1, SFilterColInfo* pCol2) {
+ if (pCol1->type != pCol2->type) {
+ return false;
+ }
+
+ if (RANGE_TYPE_MR_CTX == pCol1->type) {
+ SFilterRangeCtx* pCtx1 = (SFilterRangeCtx*)pCol1->info;
+ SFilterRangeCtx* pCtx2 = (SFilterRangeCtx*)pCol2->info;
+
+ if ((pCtx1->isnull != pCtx2->isnull) || (pCtx1->notnull != pCtx2->notnull) || (pCtx1->isrange != pCtx2->isrange)) {
+ return false;
+ }
+
+
+ SFilterRangeNode* pNode1 = pCtx1->rs;
+ SFilterRangeNode* pNode2 = pCtx2->rs;
+
+ while (true) {
+ if (NULL == pNode1 && NULL == pNode2) {
+ break;
+ }
+
+ if (NULL == pNode1 || NULL == pNode2) {
+ return false;
+ }
+
+ if (pNode1->ra.s != pNode2->ra.s || pNode1->ra.e != pNode2->ra.e || pNode1->ra.sflag != pNode2->ra.sflag || pNode1->ra.eflag != pNode2->ra.eflag) {
+ return false;
+ }
+
+ pNode1 = pNode1->next;
+ pNode2 = pNode2->next;
+ }
+ }
+
+ return true;
+}
+
void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool *conflict) {
uint32_t idx1 = 0, idx2 = 0, m = 0, n = 0;
bool equal = false;
@@ -2098,6 +2301,11 @@ void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool
return;
}
+ if (!filterIsSameUnits(&gRes1->colInfo[idx1], &gRes2->colInfo[idx2])) {
+ *conflict = true;
+ return;
+ }
+
// for long in operation
if (gRes1->colInfo[idx1].optr == OP_TYPE_EQUAL && gRes2->colInfo[idx2].optr == OP_TYPE_EQUAL) {
SFilterRangeCtx* ctx = gRes1->colInfo[idx1].info;
@@ -2711,17 +2919,22 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
for (uint32_t g = 0; g < info->groupNum; ++g) {
SFilterGroup *group = &info->groups[g];
+ // first is block unint num for a group, following append unitNum blkUnitIdx for this group
*unitNum = group->unitNum;
all = 0;
empty = 0;
+ // save group idx start pointer
+ uint32_t * pGroupIdx = unitIdx;
for (uint32_t u = 0; u < group->unitNum; ++u) {
uint32_t uidx = group->unitIdxs[u];
if (info->blkUnitRes[uidx] == 1) {
+ // blkUnitRes == 1 is always true, so need not compare every time, delete this unit from group
--(*unitNum);
all = 1;
continue;
} else if (info->blkUnitRes[uidx] == -1) {
+ // blkUnitRes == -1 is alwary false, so in group is alwary false, need delete this group from blkGroupNum
*unitNum = 0;
empty = 1;
break;
@@ -2731,6 +2944,9 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
}
if (*unitNum == 0) {
+ // if unit num is zero, reset unitIdx to start on this group
+ unitIdx = pGroupIdx;
+
--info->blkGroupNum;
assert(empty || all);
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index 6634a29f4091773c89988940c9ab6ed5de2487da..cd1f6624bdf83e4fe143c1a648e5e30947bcdd65 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -9,6 +9,7 @@
#include "scalar.h"
#include "tudf.h"
#include "ttime.h"
+#include "tcompare.h"
int32_t scalarGetOperatorParamNum(EOperatorType type) {
if (OP_TYPE_IS_NULL == type || OP_TYPE_IS_NOT_NULL == type || OP_TYPE_IS_TRUE == type || OP_TYPE_IS_NOT_TRUE == type
@@ -219,6 +220,82 @@ void sclFreeParamList(SScalarParam *param, int32_t paramNum) {
taosMemoryFree(param);
}
+void sclDowngradeValueType(SValueNode *valueNode) {
+ switch (valueNode->node.resType.type) {
+ case TSDB_DATA_TYPE_BIGINT: {
+ int8_t i8 = valueNode->datum.i;
+ if (i8 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_TINYINT;
+ *(int8_t*)&valueNode->typeData = i8;
+ break;
+ }
+ int16_t i16 = valueNode->datum.i;
+ if (i16 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_SMALLINT;
+ *(int16_t*)&valueNode->typeData = i16;
+ break;
+ }
+ int32_t i32 = valueNode->datum.i;
+ if (i32 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_INT;
+ *(int32_t*)&valueNode->typeData = i32;
+ break;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT:{
+ uint8_t u8 = valueNode->datum.i;
+ if (u8 == valueNode->datum.i) {
+ int8_t i8 = valueNode->datum.i;
+ if (i8 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_TINYINT;
+ *(int8_t*)&valueNode->typeData = i8;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_UTINYINT;
+ *(uint8_t*)&valueNode->typeData = u8;
+ }
+ break;
+ }
+ uint16_t u16 = valueNode->datum.i;
+ if (u16 == valueNode->datum.i) {
+ int16_t i16 = valueNode->datum.i;
+ if (i16 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_SMALLINT;
+ *(int16_t*)&valueNode->typeData = i16;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_USMALLINT;
+ *(uint16_t*)&valueNode->typeData = u16;
+ }
+ break;
+ }
+ uint32_t u32 = valueNode->datum.i;
+ if (u32 == valueNode->datum.i) {
+ int32_t i32 = valueNode->datum.i;
+ if (i32 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_INT;
+ *(int32_t*)&valueNode->typeData = i32;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_UINT;
+ *(uint32_t*)&valueNode->typeData = u32;
+ }
+ break;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ float f = valueNode->datum.d;
+ if (FLT_EQUAL(f, valueNode->datum.d)) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_FLOAT;
+ *(float*)&valueNode->typeData = f;
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t *rowNum) {
switch (nodeType(node)) {
case QUERY_NODE_LEFT_VALUE: {
@@ -675,6 +752,10 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
return DEAL_RES_ERROR;
}
}
+
+ if (SCL_IS_COMPARISON_OPERATOR(node->opType) && SCL_DOWNGRADE_DATETYPE(valueNode->node.resType.type)) {
+ sclDowngradeValueType(valueNode);
+ }
}
if (node->pRight && (QUERY_NODE_VALUE == nodeType(node->pRight))) {
@@ -692,6 +773,10 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
return DEAL_RES_ERROR;
}
}
+
+ if (SCL_IS_COMPARISON_OPERATOR(node->opType) && SCL_DOWNGRADE_DATETYPE(valueNode->node.resType.type)) {
+ sclDowngradeValueType(valueNode);
+ }
}
if (node->pRight && (QUERY_NODE_NODE_LIST == nodeType(node->pRight))) {
diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c
index aaa70ef5ae5f8ab00ce88b56433885cd00004893..a003315fcabeab38f49ae3a6056e25dff10e4e16 100644
--- a/source/libs/scalar/src/sclvector.c
+++ b/source/libs/scalar/src/sclvector.c
@@ -909,11 +909,11 @@ int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut, int32_t*
int8_t gConvertTypes[TSDB_DATA_TYPE_BLOB+1][TSDB_DATA_TYPE_BLOB+1] = {
/* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB */
/*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-/*BOOL*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 0, 12, 13, 14, 0, 7, 0, 0,
+/*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 7, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0,
/*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0,
/*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0,
/*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 7, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0,
-/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 0, 7, 5, 5, 5, 7, 0, 7, 0, 0,
+/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0,
/*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0,
/*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0,
/*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0,
@@ -1681,10 +1681,14 @@ void vectorBitOr(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : TMAX(pLeft->numOfRows, pRight->numOfRows) - 1;
int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
-
- __compar_fn_t fp = filterGetCompFunc(GET_PARAM_TYPE(pLeft), optr);
- if(terrno != TSDB_CODE_SUCCESS){
- return;
+ int32_t lType = GET_PARAM_TYPE(pLeft);
+ int32_t rType = GET_PARAM_TYPE(pRight);
+ __compar_fn_t fp = NULL;
+
+ if (lType == rType) {
+ fp = filterGetCompFunc(lType, optr);
+ } else {
+ fp = filterGetCompFuncEx(lType, rType, optr);
}
pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows);
@@ -1716,22 +1720,26 @@ void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *
void vectorCompare(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
SScalarParam pLeftOut = {0};
SScalarParam pRightOut = {0};
-
- vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut);
-
SScalarParam *param1 = NULL;
SScalarParam *param2 = NULL;
- if (pLeftOut.columnData != NULL) {
- param1 = &pLeftOut;
- } else {
+ if (SCL_NO_NEED_CONVERT_COMPARISION(GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight), optr)) {
param1 = pLeft;
- }
-
- if (pRightOut.columnData != NULL) {
- param2 = &pRightOut;
- } else {
param2 = pRight;
+ } else {
+ vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut);
+
+ if (pLeftOut.columnData != NULL) {
+ param1 = &pLeftOut;
+ } else {
+ param1 = pLeft;
+ }
+
+ if (pRightOut.columnData != NULL) {
+ param2 = &pRightOut;
+ } else {
+ param2 = pRight;
+ }
}
vectorCompareImpl(param1, param2, pOut, _ord, optr);
diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h
index ce841ed83cc527849021517a6ed9c7a9c3a56f0c..957fd46ba5a767858a3bb5bbe50142b4f1c1ce47 100644
--- a/source/libs/scheduler/inc/schInt.h
+++ b/source/libs/scheduler/inc/schInt.h
@@ -283,7 +283,7 @@ typedef struct SSchJob {
} SSchJob;
typedef struct SSchTaskCtx {
- SSchJob *pJob;
+ int64_t jobRid;
SSchTask *pTask;
} SSchTaskCtx;
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index ecd9daf1bcd3b83803754017aec27c1ebe62becf..5a64aaaebb3860d2c6729ac8eb1e00be0cc9cda1 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -102,15 +102,30 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
tDecoderInit(&coder, msg, msgSize);
code = tDecodeSVCreateTbBatchRsp(&coder, &batchRsp);
if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) {
+ SCH_LOCK(SCH_WRITE, &pJob->resLock);
+ if (NULL == pJob->execRes.res) {
+ pJob->execRes.res = taosArrayInit(batchRsp.nRsps, POINTER_BYTES);
+ pJob->execRes.msgType = TDMT_VND_CREATE_TABLE;
+ }
+
for (int32_t i = 0; i < batchRsp.nRsps; ++i) {
SVCreateTbRsp *rsp = batchRsp.pRsps + i;
+ if (rsp->pMeta) {
+ taosArrayPush((SArray*)pJob->execRes.res, &rsp->pMeta);
+ }
+
if (TSDB_CODE_SUCCESS != rsp->code) {
code = rsp->code;
- tDecoderClear(&coder);
- SCH_ERR_JRET(code);
}
}
+ SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
+
+ if (taosArrayGetSize((SArray*)pJob->execRes.res) <= 0) {
+ taosArrayDestroy((SArray*)pJob->execRes.res);
+ pJob->execRes.res = NULL;
+ }
}
+
tDecoderClear(&coder);
SCH_ERR_JRET(code);
}
diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c
index d16d15c1191a3360be19e6c485f13bc3ce66d0ef..9cab39c30122072207daa9e9639ab92645fc1633 100644
--- a/source/libs/scheduler/src/schTask.c
+++ b/source/libs/scheduler/src/schTask.c
@@ -821,7 +821,13 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) {
int32_t schLaunchTaskImpl(void *param) {
SSchTaskCtx *pCtx = (SSchTaskCtx *)param;
- SSchJob *pJob = pCtx->pJob;
+ SSchJob *pJob = schAcquireJob(pCtx->jobRid);
+ if (NULL == pJob) {
+ taosMemoryFree(param);
+ qDebug("job refId 0x%" PRIx64 " already not exist", pCtx->jobRid);
+ SCH_RET(TSDB_CODE_SCH_JOB_IS_DROPPING);
+ }
+
SSchTask *pTask = pCtx->pTask;
int8_t status = 0;
int32_t code = 0;
@@ -880,6 +886,8 @@ _return:
}
}
+ schReleaseJob(pJob->refId);
+
SCH_RET(code);
}
@@ -890,7 +898,7 @@ int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- param->pJob = pJob;
+ param->jobRid = pJob->refId;
param->pTask = pTask;
if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) {
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index c78ff0756f22e50e1f64a3f02526a04376eb9b08..9d4010f60e5fcb222e235181a2ce12b8d4dc4102 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -358,7 +358,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
FAIL_SHUFFLE_DISPATCH:
if (pReqs) {
for (int32_t i = 0; i < vgSz; i++) {
- taosArrayDestroy(pReqs[i].data);
+ taosArrayDestroyP(pReqs[i].data, taosMemoryFree);
taosArrayDestroy(pReqs[i].dataLen);
}
taosMemoryFree(pReqs);
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index 06ca26f0292df2447fa7c267a0d43e65f4117964..102bad742652005df440b5d4d7a87bcef34ba636 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -140,7 +140,6 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch)
return 0;
}
-// TODO: handle version
int32_t streamExecForAll(SStreamTask* pTask) {
while (1) {
int32_t batchCnt = 1;
diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c
index 5ff700546cf63acd3e0c4d0798383d6724947de1..1442ed2e0509e37d8b21806dc05343adcaa0f32c 100644
--- a/source/libs/stream/src/streamMeta.c
+++ b/source/libs/stream/src/streamMeta.c
@@ -14,7 +14,7 @@
*/
#include "executor.h"
-#include "tstream.h"
+#include "streamInc.h"
#include "ttimer.h"
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) {
@@ -23,17 +23,23 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
- pMeta->path = strdup(path);
+ int32_t len = strlen(path) + 20;
+ char* streamPath = taosMemoryCalloc(1, len);
+ sprintf(streamPath, "%s/%s", path, "stream");
+ pMeta->path = strdup(streamPath);
if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db) < 0) {
goto _err;
}
+ sprintf(streamPath, "%s/%s", pMeta->path, "checkpoints");
+ mkdir(streamPath, 0755);
+ taosMemoryFree(streamPath);
+
if (tdbTbOpen("task.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pTaskDb) < 0) {
goto _err;
}
- // open state storage backend
- if (tdbTbOpen("state.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pStateDb) < 0) {
+ if (tdbTbOpen("checkpoint.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pCheckpointDb) < 0) {
goto _err;
}
@@ -49,16 +55,13 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
pMeta->ahandle = ahandle;
pMeta->expandFunc = expandFunc;
- if (streamLoadTasks(pMeta) < 0) {
- goto _err;
- }
return pMeta;
_err:
if (pMeta->path) taosMemoryFree(pMeta->path);
if (pMeta->pTasks) taosHashCleanup(pMeta->pTasks);
- if (pMeta->pStateDb) tdbTbClose(pMeta->pStateDb);
if (pMeta->pTaskDb) tdbTbClose(pMeta->pTaskDb);
+ if (pMeta->pCheckpointDb) tdbTbClose(pMeta->pCheckpointDb);
if (pMeta->db) tdbClose(pMeta->db);
taosMemoryFree(pMeta);
return NULL;
@@ -67,7 +70,7 @@ _err:
void streamMetaClose(SStreamMeta* pMeta) {
tdbCommit(pMeta->db, &pMeta->txn);
tdbTbClose(pMeta->pTaskDb);
- tdbTbClose(pMeta->pStateDb);
+ tdbTbClose(pMeta->pCheckpointDb);
tdbClose(pMeta->db);
void* pIter = NULL;
@@ -262,6 +265,8 @@ int32_t streamLoadTasks(SStreamMeta* pMeta) {
}
}
+ tdbFree(pKey);
+ tdbFree(pVal);
if (tdbTbcClose(pCur) < 0) {
return -1;
}
diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c
index 263053778b1ae94de5a5353edf158e37604baf98..0505c3edd6dd8211792679b7164bcc001bde6c4e 100644
--- a/source/libs/stream/src/streamRecover.c
+++ b/source/libs/stream/src/streamRecover.c
@@ -176,6 +176,7 @@ int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstrea
}
int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
+#if 0
void* buf = NULL;
ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
@@ -224,10 +225,12 @@ int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
FAIL:
if (buf) taosMemoryFree(buf);
return -1;
+#endif
return 0;
}
int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
+#if 0
void* pVal = NULL;
int32_t vLen = 0;
if (tdbTbGet(pMeta->pStateDb, &pTask->taskId, sizeof(void*), &pVal, &vLen) < 0) {
@@ -241,7 +244,7 @@ int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
pTask->nextCheckId = aggCheckpoint.checkpointId + 1;
pTask->checkpointInfo = aggCheckpoint.checkpointVer;
-
+#endif
return 0;
}
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
new file mode 100644
index 0000000000000000000000000000000000000000..dfd6f012cc4f64d252f75a20f761c6f87fc05b78
--- /dev/null
+++ b/source/libs/stream/src/streamState.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "executor.h"
+#include "streamInc.h"
+#include "tcommon.h"
+#include "ttimer.h"
+
+SStreamState* streamStateOpen(char* path, SStreamTask* pTask) {
+ SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState));
+ if (pState == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+ char statePath[300];
+ sprintf(statePath, "%s/%d", path, pTask->taskId);
+ if (tdbOpen(statePath, 4096, 256, &pState->db) < 0) {
+ goto _err;
+ }
+
+ // open state storage backend
+ if (tdbTbOpen("state.db", sizeof(SWinKey), -1, SWinKeyCmpr, pState->db, &pState->pStateDb) < 0) {
+ goto _err;
+ }
+
+ if (streamStateBegin(pState) < 0) {
+ goto _err;
+ }
+
+ pState->pOwner = pTask;
+
+ return pState;
+
+_err:
+ if (pState->pStateDb) tdbTbClose(pState->pStateDb);
+ if (pState->db) tdbClose(pState->db);
+ taosMemoryFree(pState);
+ return NULL;
+}
+
+void streamStateClose(SStreamState* pState) {
+ tdbCommit(pState->db, &pState->txn);
+ tdbTbClose(pState->pStateDb);
+ tdbClose(pState->db);
+
+ taosMemoryFree(pState);
+}
+
+int32_t streamStateBegin(SStreamState* pState) {
+ if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
+ 0) {
+ return -1;
+ }
+
+ if (tdbBegin(pState->db, &pState->txn) < 0) {
+ tdbTxnClose(&pState->txn);
+ return -1;
+ }
+ return 0;
+}
+
+int32_t streamStateCommit(SStreamState* pState) {
+ if (tdbCommit(pState->db, &pState->txn) < 0) {
+ return -1;
+ }
+ memset(&pState->txn, 0, sizeof(TXN));
+ if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
+ 0) {
+ return -1;
+ }
+ if (tdbBegin(pState->db, &pState->txn) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+int32_t streamStateAbort(SStreamState* pState) {
+ if (tdbAbort(pState->db, &pState->txn) < 0) {
+ return -1;
+ }
+ memset(&pState->txn, 0, sizeof(TXN));
+ if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
+ 0) {
+ return -1;
+ }
+ if (tdbBegin(pState->db, &pState->txn) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
+ return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
+}
+int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
+ return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen);
+}
+
+int32_t streamStateDel(SStreamState* pState, const SWinKey* key) {
+ return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn);
+}
+
+SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
+ SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
+ if (pCur == NULL) return NULL;
+ tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL);
+
+ int32_t c;
+ tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c);
+ if (c != 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+ return pCur;
+}
+
+int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
+ const SWinKey* pKTmp = NULL;
+ int32_t kLen;
+ if (tdbTbcGet(pCur->pCur, (const void**)&pKTmp, &kLen, pVal, pVLen) < 0) {
+ return -1;
+ }
+ *pKey = *pKTmp;
+ return 0;
+}
+
+int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) {
+ //
+ return tdbTbcMoveToFirst(pCur->pCur);
+}
+
+int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) {
+ //
+ return tdbTbcMoveToLast(pCur->pCur);
+}
+
+SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) {
+ SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
+ if (pCur == NULL) {
+ return NULL;
+ }
+
+ int32_t c;
+ if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+ if (c > 0) return pCur;
+
+ if (tdbTbcMoveToNext(pCur->pCur) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+
+ return pCur;
+}
+
+SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key) {
+ SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
+ if (pCur == NULL) {
+ return NULL;
+ }
+
+ int32_t c;
+ if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+ if (c < 0) return pCur;
+
+ if (tdbTbcMoveToPrev(pCur->pCur) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+
+ return pCur;
+}
+
+int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur) {
+ //
+ return tdbTbcMoveToNext(pCur->pCur);
+}
+
+int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur) {
+ //
+ return tdbTbcMoveToPrev(pCur->pCur);
+}
+void streamStateFreeCur(SStreamStateCur* pCur) {
+ tdbTbcClose(pCur->pCur);
+ taosMemoryFree(pCur);
+}
+
+void streamFreeVal(void* val) { tdbFree(val); }
diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c
index 4009a47c65af469bc8a6f4fe5443411306e4ec2b..ce5917de296c317f739e79cb78cda21660769aa8 100644
--- a/source/libs/stream/src/streamTask.c
+++ b/source/libs/stream/src/streamTask.c
@@ -165,5 +165,8 @@ void tFreeSStreamTask(SStreamTask* pTask) {
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
taosArrayDestroy(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
}
+
+ if (pTask->pState) streamStateClose(pTask->pState);
+
taosMemoryFree(pTask);
}
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index d053662bd30287d5d9589a3881c8588fd3eb82ec..332f7ad2fd7be60f532b1394eb2d72adf985b82a 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -170,8 +170,17 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
if (ts < maxTs - pInfo->watermark) {
// this window has been closed.
if (pInfo->pCloseWinSBF) {
- return tScalableBfPut(pInfo->pCloseWinSBF, &ts, sizeof(TSKEY));
+ res = tScalableBfPut(pInfo->pCloseWinSBF, &ts, sizeof(TSKEY));
+ if (res == TSDB_CODE_SUCCESS) {
+ return false;
+ } else {
+ qDebug("===stream===Update close window sbf. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
+ return true;
+ }
}
+ qDebug("===stream===Update close window. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
return true;
}
@@ -193,7 +202,7 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
}
if (ts < pInfo->minTS) {
- qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ qDebug("===stream===Update min ts. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
maxTs, *pMapMaxTs, ts);
return true;
} else if (res == TSDB_CODE_SUCCESS) {
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index 07c4fa8429dc539609d3ae788caab3352b0a3e60..3bda9bcd51a1fe41fbeb09a1e4a39c3a53f1cd74 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -163,6 +163,7 @@ int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pR
}
}
ASSERT(0);
+ return -1;
}
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime) {
@@ -190,6 +191,7 @@ int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRa
}
}
ASSERT(0);
+ return -1;
}
// for debug -------------------
@@ -245,4 +247,5 @@ SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftI
}
}
ASSERT(0);
+ return -1;
}
\ No newline at end of file
diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c
index 5489a107e76082106961a0ed107413e5ec9b4a64..0be3392a9a52b69e29cbcedcb910cdbc0f9a6234 100644
--- a/source/libs/sync/src/syncSnapshot.c
+++ b/source/libs/sync/src/syncSnapshot.c
@@ -583,7 +583,7 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
&(pReceiver->snapshot));
if (code != 0) {
syncNodeErrorLog(pReceiver->pSyncNode, "snapshot stop writer true error");
- ASSERT(0);
+ // ASSERT(0);
return -1;
}
pReceiver->pWriter = NULL;
diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c
index af15c377fbc36ae523776824962f282462ff2bc9..c3c8131cbb31c3d4ac0b9fb59afc7bc751096329 100644
--- a/source/libs/sync/src/syncTimeout.c
+++ b/source/libs/sync/src/syncTimeout.c
@@ -91,16 +91,16 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
if (atomic_load_64(&ths->electTimerLogicClockUser) <= pMsg->logicClock) {
++(ths->electTimerCounter);
- sInfo("vgId:%d, sync timeout, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
- ths->electTimerCounter, ths->electTimerLogicClockUser);
+ sTrace("vgId:%d, sync timer, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
+ ths->electTimerCounter, ths->electTimerLogicClockUser);
syncNodeElect(ths);
}
} else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
if (atomic_load_64(&ths->heartbeatTimerLogicClockUser) <= pMsg->logicClock) {
++(ths->heartbeatTimerCounter);
- sInfo("vgId:%d, sync timeout, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
- ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
+ sTrace("vgId:%d, sync timer, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
+ ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
syncNodeReplicate(ths, true);
}
} else {
diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c
index 7a44edb12cddf5a386e3b77031920559d8b0a5e9..1480920f908e34bb7be5b95affe64619ac042289 100644
--- a/source/libs/tdb/src/db/tdbBtree.c
+++ b/source/libs/tdb/src/db/tdbBtree.c
@@ -509,7 +509,7 @@ static int tdbBtreeBalanceDeeper(SBTree *pBt, SPage *pRoot, SPage **ppChild, TXN
static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTxn) {
int ret;
- int nOlds;
+ int nOlds, pageIdx;
SPage *pOlds[3] = {0};
SCell *pDivCell[3] = {0};
int szDivCell[3];
@@ -782,6 +782,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
pBt);
tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0);
tdbOsFree(pNewCell);
+
+ if (TDB_CELLDECODER_FREE_VAL(&cd)) {
+ tdbFree(cd.pVal);
+ cd.pVal = NULL;
+ }
}
// move to next new page
@@ -844,13 +849,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
}
}
- // TODO: here is not corrent for drop case
- for (int i = 0; i < nNews; i++) {
- if (i < nOlds) {
- tdbPagerReturnPage(pBt->pPager, pOlds[i], pTxn);
- } else {
- tdbPagerReturnPage(pBt->pPager, pNews[i], pTxn);
- }
+ for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) {
+ tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn);
+ }
+ for (; pageIdx < nNews; ++pageIdx) {
+ tdbPagerReturnPage(pBt->pPager, pNews[pageIdx], pTxn);
}
return 0;
@@ -934,6 +937,8 @@ static int tdbFetchOvflPage(SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt)
return -1;
}
+ tdbPCacheRelease(pBt->pPager->pCache, *ppOfp, pTxn);
+
return ret;
}
@@ -1277,6 +1282,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
nLeft -= bytes;
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
+
+ tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn);
}
} else {
int nLeftKey = kLen;
@@ -1336,6 +1343,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
+ tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn);
+
nLeftKey -= bytes;
nLeft -= bytes;
}
@@ -1374,6 +1383,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno));
+ tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn);
+
nLeft -= bytes;
}
}
@@ -1401,7 +1412,7 @@ static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pD
pDecoder->pgno = 0;
TDB_CELLDECODER_SET_FREE_NIL(pDecoder);
- tdbDebug("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV);
+ // tdbTrace("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV);
// 1. Decode header part
if (!leaf) {
diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c
index ab9b21dc3fc01158ef5c504d69530b30eab6d79a..62541585911a5dfdc84c0d2fb84724c83efc5475 100644
--- a/source/libs/tdb/src/db/tdbPCache.c
+++ b/source/libs/tdb/src/db/tdbPCache.c
@@ -98,6 +98,7 @@ SPage *tdbPCacheFetch(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) {
// printf("thread %" PRId64 " fetch page %d pgno %d pPage %p nRef %d\n", taosGetSelfPthreadId(), pPage->id,
// TDB_PAGE_PGNO(pPage), pPage, nRef);
+ tdbDebug("pcache/fetch page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef);
return pPage;
}
@@ -111,6 +112,7 @@ void tdbPCacheRelease(SPCache *pCache, SPage *pPage, TXN *pTxn) {
tdbPCacheLock(pCache);
nRef = tdbUnrefPage(pPage);
+ tdbDebug("pcache/release page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef);
if (nRef == 0) {
// test the nRef again to make sure
// it is safe th handle the page
@@ -145,7 +147,7 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn)
// 1. Search the hash table
pPage = pCache->pgHash[tdbPCachePageHash(pPgid) % pCache->nHash];
while (pPage) {
- if (memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0 && pPage->pgid.pgno == pPgid->pgno) break;
+ if (pPage->pgid.pgno == pPgid->pgno && memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0) break;
pPage = pPage->pHashNext;
}
@@ -212,7 +214,8 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn)
pPage->pPager = pPageH->pPager;
memcpy(pPage->pData, pPageH->pData, pPage->pageSize);
- tdbDebug("pcache/pPageH: %p %d %p %p", pPageH, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize, pPage);
+ tdbDebug("pcache/pPageH: %p %d %p %p %d", pPageH, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize, pPage,
+ TDB_PAGE_PGNO(pPageH));
tdbPageInit(pPage, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize);
pPage->kLen = pPageH->kLen;
pPage->vLen = pPageH->vLen;
@@ -243,7 +246,7 @@ static void tdbPCachePinPage(SPCache *pCache, SPage *pPage) {
pCache->nRecyclable--;
// printf("pin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
- tdbTrace("pin page %d", pPage->id);
+ tdbDebug("pcache/pin page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id);
}
}
@@ -264,15 +267,14 @@ static void tdbPCacheUnpinPage(SPCache *pCache, SPage *pPage) {
pCache->nRecyclable++;
// printf("unpin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
- tdbTrace("unpin page %d", pPage->id);
+ tdbDebug("pcache/unpin page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id);
}
static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) {
- SPage **ppPage;
- uint32_t h;
+ uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash;
- h = tdbPCachePageHash(&(pPage->pgid));
- for (ppPage = &(pCache->pgHash[h % pCache->nHash]); (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext))
+ SPage **ppPage = &(pCache->pgHash[h]);
+ for (; (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext))
;
if (*ppPage) {
@@ -281,13 +283,11 @@ static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) {
// printf("rmv page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
}
- tdbTrace("remove page %d to hash", pPage->id);
+ tdbDebug("pcache/remove page %p/%d/%d from hash %" PRIu32, pPage, TDB_PAGE_PGNO(pPage), pPage->id, h);
}
static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) {
- int h;
-
- h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash;
+ uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash;
pPage->pHashNext = pCache->pgHash[h];
pCache->pgHash[h] = pPage;
@@ -295,7 +295,7 @@ static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) {
pCache->nPage++;
// printf("add page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
- tdbTrace("add page %d to hash", pPage->id);
+ tdbDebug("pcache/add page %p/%d/%d to hash %" PRIu32, pPage, TDB_PAGE_PGNO(pPage), pPage->id, h);
}
static int tdbPCacheOpenImpl(SPCache *pCache) {
diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c
index 276b06b147586bbf18fe73f94cdb2592032d97e2..a3f376b929291780bdd57cbf99f5db6035e70aff 100644
--- a/source/libs/tdb/src/db/tdbPage.c
+++ b/source/libs/tdb/src/db/tdbPage.c
@@ -68,12 +68,15 @@ int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t)
}
*ppPage = pPage;
+
+ tdbDebug("page/create: %p %p", pPage, xMalloc);
return 0;
}
int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg) {
u8 *ptr;
+ tdbDebug("page/destroy: %p %p", pPage, xFree);
ASSERT(xFree);
for (int iOvfl = 0; iOvfl < pPage->nOverflow; iOvfl++) {
@@ -87,6 +90,7 @@ int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg)
}
void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) {
+ tdbDebug("page/zero: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize);
pPage->pPageHdr = pPage->pData + szAmHdr;
TDB_PAGE_NCELLS_SET(pPage, 0);
TDB_PAGE_CCELLS_SET(pPage, pPage->pageSize - sizeof(SPageFtr));
@@ -103,6 +107,7 @@ void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell
}
void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) {
+ tdbDebug("page/init: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize);
pPage->pPageHdr = pPage->pData + szAmHdr;
pPage->pCellIdx = pPage->pPageHdr + TDB_PAGE_HDR_SIZE(pPage);
pPage->pFreeStart = pPage->pCellIdx + TDB_PAGE_OFFSET_SIZE(pPage) * TDB_PAGE_NCELLS(pPage);
diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c
index 4de99e8b1bde34c7f6583d0aedc205074d7c1cca..f90c39278888c7838b0c4b1b5b434e4c06fb30a0 100644
--- a/source/libs/tdb/src/db/tdbPager.c
+++ b/source/libs/tdb/src/db/tdbPager.c
@@ -166,6 +166,7 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) {
// ref page one more time so the page will not be release
tdbRefPage(pPage);
+ tdbDebug("pcache/mdirty page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id);
// Set page as dirty
pPage->isDirty = 1;
diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h
index 49126b80b6e5dd11f30a7cddf581f42994db7bec..6a694cf8f1f8cce95e7fa9373e2aa2c01128a6d9 100644
--- a/source/libs/tdb/src/inc/tdbInt.h
+++ b/source/libs/tdb/src/inc/tdbInt.h
@@ -280,13 +280,13 @@ struct SPage {
static inline i32 tdbRefPage(SPage *pPage) {
i32 nRef = atomic_add_fetch_32(&((pPage)->nRef), 1);
- tdbTrace("ref page %d, nRef %d", pPage->id, nRef);
+ tdbTrace("ref page %p/%d, nRef %d", pPage, pPage->id, nRef);
return nRef;
}
static inline i32 tdbUnrefPage(SPage *pPage) {
i32 nRef = atomic_sub_fetch_32(&((pPage)->nRef), 1);
- tdbTrace("unref page %d, nRef %d", pPage->id, nRef);
+ tdbTrace("unref page %p/%d, nRef %d", pPage, pPage->id, nRef);
return nRef;
}
diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c
index 7cfb188ac9c688a073cea236d8019eb4ab17469c..275e7b42cc10ed2a77934da84ed544a1874e0b28 100644
--- a/source/libs/transport/src/thttp.c
+++ b/source/libs/transport/src/thttp.c
@@ -21,14 +21,16 @@
#include "taoserror.h"
#include "tlog.h"
+// clang-format on
#define HTTP_RECV_BUF_SIZE 1024
+
typedef struct SHttpClient {
uv_connect_t conn;
uv_tcp_t tcp;
uv_write_t req;
uv_buf_t* wbuf;
- char *rbuf;
+ char* rbuf;
char* addr;
uint16_t port;
} SHttpClient;
@@ -129,35 +131,36 @@ static void destroyHttpClient(SHttpClient* cli) {
taosMemoryFree(cli->rbuf);
taosMemoryFree(cli->addr);
taosMemoryFree(cli);
-
}
static void clientCloseCb(uv_handle_t* handle) {
SHttpClient* cli = handle->data;
destroyHttpClient(cli);
}
-static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
- SHttpClient* cli = handle->data;
- buf->base = cli->rbuf;
- buf->len = HTTP_RECV_BUF_SIZE;
+static void clientAllocBuffCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
+ SHttpClient* cli = handle->data;
+ buf->base = cli->rbuf;
+ buf->len = HTTP_RECV_BUF_SIZE;
}
-static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) {
- SHttpClient* cli = handle->data;
+static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
+ SHttpClient* cli = handle->data;
if (nread < 0) {
- uError("http-report read error:%s", uv_err_name(nread));
+ uError("http-report recv error:%s", uv_err_name(nread));
} else {
- uInfo("http-report succ to read %d bytes, just ignore it", nread);
+ uTrace("http-report succ to recv %d bytes, just ignore it", nread);
}
uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
-}
+}
static void clientSentCb(uv_write_t* req, int32_t status) {
SHttpClient* cli = req->data;
if (status != 0) {
terrno = TAOS_SYSTEM_ERROR(status);
uError("http-report failed to send data %s", uv_strerror(status));
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
+ return;
} else {
- uInfo("http-report succ to send data");
+ uTrace("http-report succ to send data");
}
- uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb);
+ uv_read_start((uv_stream_t*)&cli->tcp, clientAllocBuffCb, clientRecvCb);
}
static void clientConnCb(uv_connect_t* req, int32_t status) {
SHttpClient* cli = req->data;
@@ -174,7 +177,7 @@ static int32_t taosBuildDstAddr(const char* server, uint16_t port, struct sockad
uint32_t ip = taosGetIpv4FromFqdn(server);
if (ip == 0xffffffff) {
terrno = TAOS_SYSTEM_ERROR(errno);
- uError("http-report failed to get http server:%s ip since %s", server, terrstr());
+ uError("http-report failed to get http server:%s since %s", server, errno == 0 ? "invalid http server" : terrstr());
return -1;
}
char buf[128] = {0};
@@ -209,7 +212,7 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
cli->tcp.data = cli;
cli->req.data = cli;
cli->wbuf = wb;
- cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
+ cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
cli->addr = tstrdup(server);
cli->port = port;
@@ -223,10 +226,10 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
if (ret != 0) {
uError("http-report failed to connect to server, reason:%s, dst:%s:%d", uv_strerror(ret), cli->addr, cli->port);
destroyHttpClient(cli);
+ uv_stop(loop);
}
uv_run(loop, UV_RUN_DEFAULT);
uv_loop_close(loop);
return terrno;
}
-// clang-format on
diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c
index 0a0dcef378bde92a18b9455b203774a3c28aa428..9e0a8f2a10c282cc8ef20e59f89aed477d5c1eef 100644
--- a/source/libs/transport/src/trans.c
+++ b/source/libs/transport/src/trans.c
@@ -43,7 +43,7 @@ void* rpcOpen(const SRpcInit* pInit) {
return NULL;
}
if (pInit->label) {
- tstrncpy(pRpc->label, pInit->label, strlen(pInit->label) + 1);
+ tstrncpy(pRpc->label, pInit->label, TSDB_LABEL_LEN);
}
// register callback handle
pRpc->cfp = pInit->cfp;
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 7052b0b915137678d6aff528a26540a973cd74f5..41688c733079f12fbd04683183dd80db3b65606d 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -16,7 +16,7 @@
#include "transComm.h"
typedef struct SConnList {
- queue conn;
+ queue conns;
int32_t size;
} SConnList;
@@ -107,11 +107,11 @@ static void doCloseIdleConn(void* param);
static void cliReadTimeoutCb(uv_timer_t* handle);
// register timer in each thread to clear expire conn
// static void cliTimeoutCb(uv_timer_t* handle);
-// alloc buf for recv
+// alloc buffer for recv
static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
-// callback after read nbytes from socket
+// callback after recv nbytes from socket
static void cliRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
-// callback after write data to socket
+// callback after send data to socket
static void cliSendCb(uv_write_t* req, int status);
// callback after conn to server
static void cliConnCb(uv_connect_t* req, int status);
@@ -129,19 +129,14 @@ static SCliConn* cliCreateConn(SCliThrd* thrd);
static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/);
static void cliDestroy(uv_handle_t* handle);
static void cliSend(SCliConn* pConn);
+static void cliDestroyConnMsgs(SCliConn* conn, bool destroy);
-static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
- if (code != 0) return false;
- if (pCtx->retryCnt == 0) return false;
- if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
- return true;
-}
+// cli util func
+static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx);
+static void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
+
+static int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* resp);
-void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
-/*
- * set TCP connection timeout per-socket level
- */
-static int cliCreateSocket();
// process data read from server, add decompress etc later
static void cliHandleResp(SCliConn* conn);
// handle except about conn
@@ -169,15 +164,14 @@ static void destroyThrdObj(SCliThrd* pThrd);
static void cliWalkCb(uv_handle_t* handle, void* arg);
static void cliReleaseUnfinishedMsg(SCliConn* conn) {
- SCliMsg* pMsg = NULL;
for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
- pMsg = transQueueGet(&conn->cliMsgs, i);
- if (pMsg != NULL && pMsg->ctx != NULL) {
- if (conn->ctx.freeFunc != NULL) {
- conn->ctx.freeFunc(pMsg->ctx->ahandle);
+ SCliMsg* msg = transQueueGet(&conn->cliMsgs, i);
+ if (msg != NULL && msg->ctx != NULL) {
+ if (conn->ctx.freeFunc != NULL && msg->ctx->ahandle != NULL) {
+ conn->ctx.freeFunc(msg->ctx->ahandle);
}
}
- destroyCmsg(pMsg);
+ destroyCmsg(msg);
}
}
#define CLI_RELEASE_UV(loop) \
@@ -217,8 +211,10 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
} \
if (i == sz) { \
pMsg = NULL; \
+ tDebug("msg not found, %" PRIu64 "", ahandle); \
} else { \
pMsg = transQueueRm(&conn->cliMsgs, i); \
+ tDebug("msg found, %" PRIu64 "", ahandle); \
} \
} while (0)
#define CONN_GET_NEXT_SENDMSG(conn) \
@@ -470,8 +466,8 @@ void* createConnPool(int size) {
void* destroyConnPool(void* pool) {
SConnList* connList = taosHashIterate((SHashObj*)pool, NULL);
while (connList != NULL) {
- while (!QUEUE_IS_EMPTY(&connList->conn)) {
- queue* h = QUEUE_HEAD(&connList->conn);
+ while (!QUEUE_IS_EMPTY(&connList->conns)) {
+ queue* h = QUEUE_HEAD(&connList->conns);
SCliConn* c = QUEUE_DATA(h, SCliConn, q);
cliDestroyConn(c, true);
}
@@ -484,21 +480,21 @@ void* destroyConnPool(void* pool) {
static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
char key[32] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
- SHashObj* pPool = pool;
- SConnList* plist = taosHashGet(pPool, key, strlen(key));
+
+ SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key));
if (plist == NULL) {
SConnList list = {0};
- taosHashPut(pPool, key, strlen(key), (void*)&list, sizeof(list));
- plist = taosHashGet(pPool, key, strlen(key));
- QUEUE_INIT(&plist->conn);
+ taosHashPut((SHashObj*)pool, key, strlen(key), (void*)&list, sizeof(list));
+ plist = taosHashGet((SHashObj*)pool, key, strlen(key));
+ QUEUE_INIT(&plist->conns);
}
- if (QUEUE_IS_EMPTY(&plist->conn)) {
+ if (QUEUE_IS_EMPTY(&plist->conns)) {
return NULL;
}
plist->size -= 1;
- queue* h = QUEUE_HEAD(&plist->conn);
+ queue* h = QUEUE_HEAD(&plist->conns);
SCliConn* conn = QUEUE_DATA(h, SCliConn, q);
conn->status = ConnNormal;
QUEUE_REMOVE(&conn->q);
@@ -514,22 +510,21 @@ static void addConnToPool(void* pool, SCliConn* conn) {
if (conn->status == ConnInPool) {
return;
}
- SCliThrd* thrd = conn->hostThrd;
- CONN_HANDLE_THREAD_QUIT(thrd);
-
allocConnRef(conn, true);
+ SCliThrd* thrd = conn->hostThrd;
if (conn->timer != NULL) {
uv_timer_stop(conn->timer);
taosArrayPush(thrd->timerList, &conn->timer);
conn->timer->data = NULL;
conn->timer = NULL;
}
+ if (T_REF_VAL_GET(conn) > 1) {
+ transUnrefCliHandle(conn);
+ }
+
+ cliDestroyConnMsgs(conn, false);
- STrans* pTransInst = thrd->pTransInst;
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
- transCtxCleanup(&conn->ctx);
conn->status = ConnInPool;
if (conn->list == NULL) {
@@ -540,18 +535,15 @@ static void addConnToPool(void* pool, SCliConn* conn) {
} else {
tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
}
- assert(conn->list != NULL);
- QUEUE_INIT(&conn->q);
- QUEUE_PUSH(&conn->list->conn, &conn->q);
+ QUEUE_PUSH(&conn->list->conns, &conn->q);
conn->list->size += 1;
- conn->task = NULL;
- assert(!QUEUE_IS_EMPTY(&conn->list->conn));
-
if (conn->list->size >= 50) {
STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
arg->param1 = conn;
arg->param2 = thrd;
+
+ STrans* pTransInst = thrd->pTransInst;
conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
}
}
@@ -691,11 +683,10 @@ static void cliDestroy(uv_handle_t* handle) {
transRemoveExHandle(transGetRefMgt(), conn->refId);
taosMemoryFree(conn->ip);
- conn->stream->data = NULL;
taosMemoryFree(conn->stream);
- transCtxCleanup(&conn->ctx);
- cliReleaseUnfinishedMsg(conn);
- transQueueDestroy(&conn->cliMsgs);
+
+ cliDestroyConnMsgs(conn, true);
+
tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn);
transReqQueueClear(&conn->wreqQueue);
transDestroyBuffer(&conn->readBuf);
@@ -738,8 +729,6 @@ static void cliSendCb(uv_write_t* req, int status) {
}
void cliSend(SCliConn* pConn) {
- CONN_HANDLE_BROKEN(pConn);
-
assert(!transQueueEmpty(&pConn->cliMsgs));
SCliMsg* pCliMsg = NULL;
@@ -756,8 +745,8 @@ void cliSend(SCliConn* pConn) {
pMsg->pCont = (void*)rpcMallocCont(0);
pMsg->contLen = 0;
}
- int msgLen = transMsgLenFromCont(pMsg->contLen);
+ int msgLen = transMsgLenFromCont(pMsg->contLen);
STransMsgHead* pHead = transHeadFromCont(pMsg->pCont);
pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0;
pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0;
@@ -769,8 +758,6 @@ void cliSend(SCliConn* pConn) {
pHead->traceId = pMsg->info.traceId;
pHead->magicNum = htonl(TRANS_MAGIC_NUM);
- uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
-
STraceId* trace = &pMsg->info.traceId;
tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn,
TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen);
@@ -792,6 +779,8 @@ void cliSend(SCliConn* pConn) {
tGTrace("%s conn %p start timer for msg:%s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pMsg->msgType));
uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0);
}
+
+ uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
uv_write_t* req = transReqQueuePush(&pConn->wreqQueue);
uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
return;
@@ -807,7 +796,6 @@ void cliConnCb(uv_connect_t* req, int status) {
cliHandleExcept(pConn);
return;
}
- // int addrlen = sizeof(pConn->addr);
struct sockaddr peername, sockname;
int addrlen = sizeof(peername);
@@ -840,7 +828,7 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
int64_t refId = (int64_t)(pMsg->msg.info.handle);
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId);
if (exh == NULL) {
- tDebug("%" PRId64 " already release", refId);
+ tDebug("%" PRId64 " already released", refId);
destroyCmsg(pMsg);
return;
}
@@ -856,6 +844,9 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
return;
}
cliSend(conn);
+ } else {
+ tError("%s conn %p already released", CONN_GET_INST_LABEL(conn), conn);
+ destroyCmsg(pMsg);
}
}
static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd) {
@@ -905,6 +896,27 @@ void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
}
}
}
+
+bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
+ if (code != 0) return false;
+ if (pCtx->retryCnt == 0) return false;
+ if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
+ return true;
+}
+
+int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* pResp) {
+ if (pMsg == NULL) return -1;
+
+ memset(pResp, 0, sizeof(STransMsg));
+
+ pResp->code = TSDB_CODE_RPC_BROKEN_LINK;
+ pResp->msgType = pMsg->msg.msgType + 1;
+ pResp->info.ahandle = pMsg->ctx ? pMsg->ctx->ahandle : NULL;
+ pResp->info.traceId = pMsg->msg.info.traceId;
+
+ return 0;
+}
+
void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STrans* pTransInst = pThrd->pTransInst;
STransConnCtx* pCtx = pMsg->ctx;
@@ -920,13 +932,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
if (ignore == true) {
// persist conn already release by server
- STransMsg resp = {0};
- resp.code = TSDB_CODE_RPC_BROKEN_LINK;
- resp.msgType = pMsg->msg.msgType + 1;
-
- resp.info.ahandle = pMsg && pMsg->ctx ? pMsg->ctx->ahandle : NULL;
- resp.info.traceId = pMsg->msg.info.traceId;
-
+ STransMsg resp;
+ cliBuildExceptResp(pMsg, &resp);
pTransInst->cfp(pTransInst->parent, &resp, NULL);
destroyCmsg(pMsg);
return;
@@ -991,9 +998,6 @@ static void cliAsyncCb(uv_async_t* handle) {
QUEUE_REMOVE(h);
SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
- if (pMsg == NULL) {
- continue;
- }
(*cliAsyncHandle[pMsg->type])(pMsg, pThrd);
count++;
}
@@ -1035,24 +1039,58 @@ static void cliPrepareCb(uv_prepare_t* handle) {
if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd);
}
+void cliDestroyConnMsgs(SCliConn* conn, bool destroy) {
+ transCtxCleanup(&conn->ctx);
+ cliReleaseUnfinishedMsg(conn);
+ if (destroy == 1) {
+ transQueueDestroy(&conn->cliMsgs);
+ } else {
+ transQueueClear(&conn->cliMsgs);
+ }
+}
+
+void cliIteraConnMsgs(SCliConn* conn) {
+ SCliThrd* pThrd = conn->hostThrd;
+ STrans* pTransInst = pThrd->pTransInst;
+
+ for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cmsg = transQueueGet(&conn->cliMsgs, i);
+ if (cmsg->type == Release || REQUEST_NO_RESP(&cmsg->msg) || cmsg->msg.msgType == TDMT_SCH_DROP_TASK) {
+ continue;
+ }
+
+ STransMsg resp = {0};
+ if (-1 == cliBuildExceptResp(cmsg, &resp)) {
+ continue;
+ }
+ pTransInst->cfp(pTransInst->parent, &resp, NULL);
+
+ cmsg->ctx->ahandle = NULL;
+ }
+}
bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) {
if (pHead->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
uint64_t ahandle = pHead->ahandle;
+ tDebug("ahandle = %" PRIu64 "", ahandle);
SCliMsg* pMsg = NULL;
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle);
+
transClearBuffer(&conn->readBuf);
transFreeMsg(transContFromHead((char*)pHead));
- if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) {
- SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0);
- if (cliMsg->type == Release) return true;
+
+ for (int i = 0; ahandle == 0 && i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, i);
+ if (cliMsg->type == Release) {
+ assert(pMsg == NULL);
+ return true;
+ }
}
+
+ cliIteraConnMsgs(conn);
+
tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId);
- if (T_REF_VAL_GET(conn) > 1) {
- transUnrefCliHandle(conn);
- }
destroyCmsg(pMsg);
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
+
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn);
return true;
}
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 447db7613656613255369230138979a7596754a9..7007079f8760c944c70bbfe0798fb0fc518cf958 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -276,14 +276,16 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
while (transReadComplete(pBuf)) {
tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn);
if (true == pBuf->invalid || false == uvHandleReq(conn)) {
- tError("%s conn %p read invalid packet", transLabel(pTransInst), conn);
+ tError("%s conn %p read invalid packet, received from %s, local info:%s", transLabel(pTransInst), conn,
+ conn->dst, conn->src);
destroyConn(conn, true);
return;
}
}
return;
} else {
- tError("%s conn %p read invalid packet, exceed limit", transLabel(pTransInst), conn);
+ tError("%s conn %p read invalid packet, exceed limit, received from %s, local info:", transLabel(pTransInst),
+ conn, conn->dst, conn->src);
destroyConn(conn, true);
return;
}
@@ -490,7 +492,6 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
// release handle to rpc init
if (msg->type == Quit) {
(*transAsyncHandle[msg->type])(msg, pThrd);
- continue;
} else {
STransMsg transMsg = msg->msg;
@@ -649,7 +650,7 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) {
pObj->workerIdx = (pObj->workerIdx + 1) % pObj->numOfThreads;
- tTrace("new conntion accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx);
+ tTrace("new connection accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx);
uv_write2(wr, (uv_stream_t*)&(pObj->pipe[pObj->workerIdx][0]), &buf, 1, (uv_stream_t*)cli, uvOnPipeWriteCb);
} else {
@@ -769,7 +770,7 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
// conn set
QUEUE_INIT(&pThrd->conn);
- pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 1, pThrd, uvWorkerAsyncCb);
+ pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 5, pThrd, uvWorkerAsyncCb);
uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb);
// uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
return true;
@@ -905,23 +906,30 @@ static void uvDestroyConn(uv_handle_t* handle) {
}
}
static void uvPipeListenCb(uv_stream_t* handle, int status) {
- ASSERT(status == 0);
+ if (status != 0) {
+ tError("server failed to init pipe");
+ return;
+ }
SServerObj* srv = container_of(handle, SServerObj, pipeListen);
uv_pipe_t* pipe = &(srv->pipe[srv->numOfWorkerReady][0]);
- ASSERT(0 == uv_pipe_init(srv->loop, pipe, 1));
- ASSERT(0 == uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe));
- ASSERT(1 == uv_is_readable((uv_stream_t*)pipe));
- ASSERT(1 == uv_is_writable((uv_stream_t*)pipe));
- ASSERT(0 == uv_is_closing((uv_handle_t*)pipe));
+ int ret = uv_pipe_init(srv->loop, pipe, 1);
+ assert(ret == 0);
- srv->numOfWorkerReady++;
+ ret = uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe);
+ assert(ret == 0);
- // ASSERT(0 == uv_listen((uv_stream_t*)&ctx.send.tcp, 512, uvOnAcceptCb));
+ ret = uv_is_readable((uv_stream_t*)pipe);
+ assert(ret == 1);
- // r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, read_cb);
- // ASSERT(r == 0);
+ ret = uv_is_writable((uv_stream_t*)pipe);
+ assert(ret == 1);
+
+ ret = uv_is_closing((uv_handle_t*)pipe);
+ assert(ret == 0);
+
+ srv->numOfWorkerReady++;
}
void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
@@ -936,7 +944,9 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->port = port;
uv_loop_init(srv->loop);
- assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
+ int ret = uv_pipe_init(srv->loop, &srv->pipeListen, 0);
+ assert(ret == 0);
+
#ifdef WINDOWS
char pipeName[64];
snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc.%p-" PRIu64, taosSafeRand(), GetCurrentProcessId());
@@ -945,8 +955,11 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
snprintf(pipeName, sizeof(pipeName), "%s%spipe.trans.rpc.%08X-" PRIu64, tsTempDir, TD_DIRSEP, taosSafeRand(),
taosGetSelfPthreadId());
#endif
- assert(0 == uv_pipe_bind(&srv->pipeListen, pipeName));
- assert(0 == uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb));
+ ret = uv_pipe_bind(&srv->pipeListen, pipeName);
+ assert(ret == 0);
+
+ ret = uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb);
+ assert(ret == 0);
for (int i = 0; i < srv->numOfThreads; i++) {
SWorkThrd* thrd = (SWorkThrd*)taosMemoryCalloc(1, sizeof(SWorkThrd));
diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c
index 0983d344c1be9f03ca3010d75c78059e76ac8cb7..93ced912f8e2358c2aab6f04957ce060cf61c924 100644
--- a/source/libs/wal/src/walMeta.c
+++ b/source/libs/wal/src/walMeta.c
@@ -121,7 +121,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
if (found == NULL) {
// file corrupted, no complete log
// TODO delete and search in previous files
- ASSERT(0);
+ /*ASSERT(0);*/
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c
index a5b5a2b7b4cac113978d8278ecf0a57686a67257..9db7d6c4554e3231399abd5ab470ea71f8207a37 100644
--- a/source/libs/wal/src/walRead.c
+++ b/source/libs/wal/src/walRead.c
@@ -372,7 +372,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) {
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead) {
int64_t code;
- ASSERT(pRead->curVersion == pHead->head.version);
+// ASSERT(pRead->curVersion == pHead->head.version);
code = taosLSeekFile(pRead->pLogFile, pHead->head.bodyLen, SEEK_CUR);
if (code < 0) {
diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c
index 30aaa01dae0bf26bb930271f056d77226e808a4d..3dfb1458ad2fc802af761f68a4fe4407098fff25 100644
--- a/source/os/src/osDir.c
+++ b/source/os/src/osDir.c
@@ -160,6 +160,66 @@ int32_t taosMulMkDir(const char *dirname) {
return code;
}
+int32_t taosMulModeMkDir(const char *dirname, int mode) {
+ if (dirname == NULL) return -1;
+ char temp[1024];
+ char *pos = temp;
+ int32_t code = 0;
+#ifdef WINDOWS
+ taosRealPath(dirname, temp, sizeof(temp));
+ if (temp[1] == ':') pos += 3;
+#else
+ strcpy(temp, dirname);
+#endif
+
+ if (taosDirExist(temp)) {
+ chmod(temp, mode);
+ return code;
+ }
+
+ if (strncmp(temp, TD_DIRSEP, 1) == 0) {
+ pos += 1;
+ } else if (strncmp(temp, "." TD_DIRSEP, 2) == 0) {
+ pos += 2;
+ }
+
+ for (; *pos != '\0'; pos++) {
+ if (*pos == TD_DIRSEP[0]) {
+ *pos = '\0';
+#ifdef WINDOWS
+ code = _mkdir(temp, mode);
+#else
+ code = mkdir(temp, mode);
+#endif
+ if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return code;
+ }
+ *pos = TD_DIRSEP[0];
+ }
+ }
+
+ if (*(pos - 1) != TD_DIRSEP[0]) {
+#ifdef WINDOWS
+ code = _mkdir(temp, mode);
+#else
+ code = mkdir(temp, mode);
+#endif
+ if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return code;
+ }
+ }
+
+ if (code < 0 && errno == EEXIST) {
+ chmod(temp, mode);
+ return 0;
+ }
+
+ chmod(temp, mode);
+ return code;
+}
+
void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
TdDirPtr pDir = taosOpenDir(dirname);
if (pDir == NULL) return;
diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c
index 5353cd9bfec94b460fc1f5c3d9ad657ead6ad76b..699f0db7a193b1e0390efd12de6f639de5b69f86 100644
--- a/source/util/src/talgo.c
+++ b/source/util/src/talgo.c
@@ -201,6 +201,7 @@ void *taosbsearch(const void *key, const void *base, int32_t nmemb, int32_t size
return (c > 0) ? p : (midx > 0 ? p - size : NULL);
} else {
ASSERT(0);
+ return NULL;
}
}
diff --git a/source/util/src/tcache.c b/source/util/src/tcache.c
index dd61f7d22548a223bb33f01fb40b9dcd7423e5af..f9f42aa103d2b7ccbb95c60e5ae7e45c95e51699 100644
--- a/source/util/src/tcache.c
+++ b/source/util/src/tcache.c
@@ -702,7 +702,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
taosMsleep(50);
}
- uInfo("cache:%s will be cleaned up", pCacheObj->name);
+ uTrace("cache:%s will be cleaned up", pCacheObj->name);
doCleanupDataCache(pCacheObj);
}
diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c
index fe3065b2b78a46a85d6dc04b90fcff4e0fe80f03..cbda4e46557e7931d1ce5dea31c2baa4f2d6ddef 100644
--- a/source/util/src/tcompare.c
+++ b/source/util/src/tcompare.c
@@ -186,15 +186,16 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
- if (len1 != len2) {
- return len1 > len2 ? 1 : -1;
- } else {
- int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), len1);
- if (ret == 0) {
+ int32_t minLen = TMIN(len1, len2);
+ int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), minLen);
+ if (ret == 0) {
+ if (len1 == len2) {
return 0;
} else {
- return ret > 0 ? 1 : -1;
+ return len1 > len2 ? 1 : -1;
}
+ } else {
+ return ret > 0 ? 1 : -1;
}
}
@@ -243,9 +244,760 @@ int32_t compareJsonVal(const void *pLeft, const void *pRight) {
return 0;
}else{
assert(0);
+ return 0;
}
}
+int32_t compareInt8Int16(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Int32(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Int64(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Float(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Double(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint8(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint16(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint32(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint64(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int8(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int32(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int64(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Float(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Double(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint8(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint16(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint32(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint64(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+
+int32_t compareInt32Int8(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Int16(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Int64(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Float(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Double(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint8(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint16(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint32(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint64(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int8(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int16(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int32(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Float(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Double(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint8(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint16(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint32(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint64(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt8(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt16(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt32(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt64(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatDouble(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+
+ if (isnan(left) && isnan(right)) {
+ return 0;
+ }
+
+ if (isnan(left)) {
+ return -1;
+ }
+
+ if (isnan(right)) {
+ return 1;
+ }
+
+ if (FLT_EQUAL(left, right)) {
+ return 0;
+ }
+ return FLT_GREATER(left, right) ? 1 : -1;
+}
+
+int32_t compareFloatUint8(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint16(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint32(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint64(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt8(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt16(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt32(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt64(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleFloat(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+
+ if (isnan(left) && isnan(right)) {
+ return 0;
+ }
+
+ if (isnan(left)) {
+ return -1;
+ }
+
+ if (isnan(right)) {
+ return 1;
+ }
+
+ if (FLT_EQUAL(left, right)) {
+ return 0;
+ }
+ return FLT_GREATER(left, right) ? 1 : -1;
+}
+
+int32_t compareDoubleUint8(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint16(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint32(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint64(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int8(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int16(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int32(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int64(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Float(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Double(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint16(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint32(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint64(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int8(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int16(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int32(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int64(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Float(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Double(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint8(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint32(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint64(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int8(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int16(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int32(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int64(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Float(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Double(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint8(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint16(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint64(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int8(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int16(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int32(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int64(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Float(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Double(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint8(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint16(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint32(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+
int32_t compareJsonValDesc(const void *pLeft, const void *pRight) {
return compareJsonVal(pRight, pLeft);
}
diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c
index e8f1f06ef10d6da3c2ae0c24b7626ccdc5b72aea..ba877915b13b6e522367637bd7713edc8feee0f3 100644
--- a/source/util/src/tcompression.c
+++ b/source/util/src/tcompression.c
@@ -83,8 +83,8 @@ int32_t tsCompressInit() {
if (lossyFloat == false && lossyDouble == false) return 0;
tdszInit(fPrecision, dPrecision, maxRange, curRange, Compressor);
- if (lossyFloat) uInfo("lossy compression float is opened. ");
- if (lossyDouble) uInfo("lossy compression double is opened. ");
+ if (lossyFloat) uTrace("lossy compression float is opened. ");
+ if (lossyDouble) uTrace("lossy compression double is opened. ");
return 1;
}
// exit call
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 662a3f0c88012191f3a7d76c78eb6d06a8b20292..3117152af6a3a5c4d1fb6ce08896924d3c0d6d1c 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -121,7 +121,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, "Connection killed")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, "Syntax error in SQL")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specified or available")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist")
-TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
@@ -621,7 +621,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_SCHEMA, "Rsma invalid schema
//index
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
-TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file")
+TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is invalid")
//tmq
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
diff --git a/source/util/src/thash.c b/source/util/src/thash.c
index aee84a0d55336c63840d1a5df887da7752592841..b69d8ea52866055668ce4937836c5eb46842f1c2 100644
--- a/source/util/src/thash.c
+++ b/source/util/src/thash.c
@@ -21,7 +21,7 @@
// the add ref count operation may trigger the warning if the reference count is greater than the MAX_WARNING_REF_COUNT
#define MAX_WARNING_REF_COUNT 10000
-#define HASH_MAX_CAPACITY (1024 * 1024 * 16)
+#define HASH_MAX_CAPACITY (1024 * 1024 * 1024)
#define HASH_DEFAULT_LOAD_FACTOR (0.75)
#define HASH_INDEX(v, c) ((v) & ((c)-1))
@@ -67,6 +67,7 @@ struct SHashObj {
bool enableUpdate; // enable update
SArray *pMemBlock; // memory block allocated for SHashEntry
_hash_before_fn_t callbackFp; // function invoked before return the value to caller
+ int64_t compTimes;
};
/*
@@ -146,6 +147,7 @@ static FORCE_INLINE SHashNode *doSearchInEntryList(SHashObj *pHashObj, SHashEntr
uint32_t hashVal) {
SHashNode *pNode = pe->next;
while (pNode) {
+ atomic_add_fetch_64(&pHashObj->compTimes, 1);
if ((pNode->keyLen == keyLen) && ((*(pHashObj->equalFp))(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0) &&
pNode->removed == 0) {
assert(pNode->hashVal == hashVal);
@@ -250,11 +252,15 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp
// the max slots is not defined by user
pHashObj->capacity = taosHashCapacity((int32_t)capacity);
+ pHashObj->size = 0;
pHashObj->equalFp = memcmp;
pHashObj->hashFp = fn;
pHashObj->type = type;
+ pHashObj->lock = 0;
pHashObj->enableUpdate = update;
+ pHashObj->freeFp = NULL;
+ pHashObj->callbackFp = NULL;
ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0);
@@ -327,7 +333,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const vo
// disable resize
taosHashRLock(pHashObj);
- int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
+ uint32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
SHashEntry *pe = pHashObj->hashList[slot];
taosHashEntryWLock(pHashObj, pe);
@@ -882,3 +888,7 @@ void *taosHashAcquire(SHashObj *pHashObj, const void *key, size_t keyLen) {
}
void taosHashRelease(SHashObj *pHashObj, void *p) { taosHashCancelIterate(pHashObj, p); }
+
+int64_t taosHashGetCompTimes(SHashObj *pHashObj) { return atomic_load_64(&pHashObj->compTimes); }
+
+
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index a2d65d6a542f72eae239f15c79be7c64c8df3bd1..46203658f15848fffca902606ba0df50647dac86 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -97,7 +97,7 @@ int32_t tqDebugFlag = 135;
int32_t fsDebugFlag = 135;
int32_t metaDebugFlag = 135;
int32_t udfDebugFlag = 135;
-int32_t smaDebugFlag = 135;
+int32_t smaDebugFlag = 131;
int32_t idxDebugFlag = 135;
int64_t dbgEmptyW = 0;
@@ -446,7 +446,10 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b
}
if (dflag & DEBUG_SCREEN) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
write(1, buffer, (uint32_t)len);
+#pragma GCC diagnostic pop
}
}
diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c
index 0e608d0da22da836f0a357c7bd4f9b194c11fd13..2767fed9373aa47ebdbea39b07f28c238db14c7d 100644
--- a/source/util/src/tpagedbuf.c
+++ b/source/util/src/tpagedbuf.c
@@ -33,7 +33,7 @@ struct SDiskbasedBuf {
int32_t pageSize; // current used page size
int32_t inMemPages; // numOfPages that are allocated in memory
SList* freePgList; // free page list
- SHashObj* groupSet; // id hash table, todo remove it
+ SArray* pIdList; // page id list
SHashObj* all;
SList* lruList;
void* emptyDummyIdList; // dummy id list
@@ -241,26 +241,7 @@ static int32_t loadPageFromDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) {
return 0;
}
-static SIDList addNewGroup(SDiskbasedBuf* pBuf, int32_t groupId) {
- assert(taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t)) == NULL);
-
- SArray* pa = taosArrayInit(1, POINTER_BYTES);
- int32_t ret = taosHashPut(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t), &pa, POINTER_BYTES);
- assert(ret == 0);
-
- return pa;
-}
-
-static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t pageId) {
- SIDList list = NULL;
-
- char** p = taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t));
- if (p == NULL) { // it is a new group id
- list = addNewGroup(pBuf, groupId);
- } else {
- list = (SIDList)(*p);
- }
-
+static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t pageId) {
pBuf->numOfPages += 1;
SPageInfo* ppi = taosMemoryMalloc(sizeof(SPageInfo));
@@ -273,7 +254,7 @@ static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t pag
ppi->pn = NULL;
ppi->dirty = false;
- return *(SPageInfo**)taosArrayPush(list, &ppi);
+ return *(SPageInfo**)taosArrayPush(pBuf->pIdList, &ppi);
}
static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
@@ -293,22 +274,13 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
}
}
- // int32_t pos = listNEles(pBuf->lruList);
- // SListIter iter1 = {0};
- // tdListInitIter(pBuf->lruList, &iter1, TD_LIST_BACKWARD);
- // SListNode* pn1 = NULL;
- // while((pn1 = tdListNext(&iter1)) != NULL) {
- // SPageInfo* pageInfo = *(SPageInfo**) pn1->data;
- // printf("page %d is used, dirty:%d, pos:%d\n", pageInfo->pageId, pageInfo->dirty, pos - 1);
- // pos -= 1;
- // }
-
return pn;
}
static char* evacOneDataPage(SDiskbasedBuf* pBuf) {
char* bufPage = NULL;
SListNode* pn = getEldestUnrefedPage(pBuf);
+ terrno = 0;
// all pages are referenced by user, try to allocate new space
if (pn == NULL) {
@@ -332,6 +304,7 @@ static char* evacOneDataPage(SDiskbasedBuf* pBuf) {
bufPage = flushPageToDisk(pBuf, d);
}
+ ASSERT((bufPage != NULL) || terrno != TSDB_CODE_SUCCESS);
return bufPage;
}
@@ -380,7 +353,8 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
// init id hash table
_hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT);
- pPBuf->groupSet = taosHashInit(10, fn, true, false);
+ pPBuf->pIdList = taosArrayInit(4, POINTER_BYTES);
+
pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES
pPBuf->all = taosHashInit(10, fn, true, false);
@@ -397,7 +371,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
return TSDB_CODE_SUCCESS;
}
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) {
pBuf->statis.getPages += 1;
char* availablePage = NULL;
@@ -423,7 +397,7 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
*pageId = (++pBuf->allocateId);
// register page id info
- pi = registerPage(pBuf, groupId, *pageId);
+ pi = registerPage(pBuf, *pageId);
// add to hash map
taosHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES);
@@ -524,19 +498,11 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
pBuf->statis.releasePages += 1;
}
-size_t getNumOfBufGroupId(const SDiskbasedBuf* pBuf) { return taosHashGetSize(pBuf->groupSet); }
-
size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; }
-SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf, int32_t groupId) {
- assert(pBuf != NULL);
-
- char** p = taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t));
- if (p == NULL) { // it is a new group id
- return pBuf->emptyDummyIdList;
- } else {
- return (SArray*)(*p);
- }
+SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf) {
+ ASSERT(pBuf != NULL);
+ return pBuf->pIdList;
}
void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
@@ -576,26 +542,21 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
taosRemoveFile(pBuf->path);
taosMemoryFreeClear(pBuf->path);
- SArray** p = taosHashIterate(pBuf->groupSet, NULL);
- while (p) {
- size_t n = taosArrayGetSize(*p);
- for (int32_t i = 0; i < n; ++i) {
- SPageInfo* pi = taosArrayGetP(*p, i);
- taosMemoryFreeClear(pi->pData);
- taosMemoryFreeClear(pi);
- }
-
- taosArrayDestroy(*p);
- p = taosHashIterate(pBuf->groupSet, p);
+ size_t n = taosArrayGetSize(pBuf->pIdList);
+ for (int32_t i = 0; i < n; ++i) {
+ SPageInfo* pi = taosArrayGetP(pBuf->pIdList, i);
+ taosMemoryFreeClear(pi->pData);
+ taosMemoryFreeClear(pi);
}
+ taosArrayDestroy(pBuf->pIdList);
+
tdListFree(pBuf->lruList);
tdListFree(pBuf->freePgList);
taosArrayDestroy(pBuf->emptyDummyIdList);
taosArrayDestroy(pBuf->pFree);
- taosHashCleanup(pBuf->groupSet);
taosHashCleanup(pBuf->all);
taosMemoryFreeClear(pBuf->id);
@@ -659,32 +620,32 @@ void dBufPrintStatis(const SDiskbasedBuf* pBuf) {
pBuf->totalBufSize / 1024.0, pBuf->numOfPages, listNEles(pBuf->lruList) * pBuf->pageSize / 1024.0,
listNEles(pBuf->lruList), pBuf->fileSize / 1024.0, pBuf->pageSize / 1024.0f, pBuf->id);
- printf(
- "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb\n",
- ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages,
- ps->loadBytes / (1024.0 * ps->loadPages));
+ if (ps->loadPages > 0) {
+ printf(
+ "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb\n",
+ ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
+ ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
+ } else {
+ printf("no page loaded\n");
+ }
}
void clearDiskbasedBuf(SDiskbasedBuf* pBuf) {
- SArray** p = taosHashIterate(pBuf->groupSet, NULL);
- while (p) {
- size_t n = taosArrayGetSize(*p);
- for (int32_t i = 0; i < n; ++i) {
- SPageInfo* pi = taosArrayGetP(*p, i);
- taosMemoryFreeClear(pi->pData);
- taosMemoryFreeClear(pi);
- }
- taosArrayDestroy(*p);
- p = taosHashIterate(pBuf->groupSet, p);
+ size_t n = taosArrayGetSize(pBuf->pIdList);
+ for (int32_t i = 0; i < n; ++i) {
+ SPageInfo* pi = taosArrayGetP(pBuf->pIdList, i);
+ taosMemoryFreeClear(pi->pData);
+ taosMemoryFreeClear(pi);
}
+ taosArrayClear(pBuf->pIdList);
+
tdListEmpty(pBuf->lruList);
tdListEmpty(pBuf->freePgList);
taosArrayClear(pBuf->emptyDummyIdList);
taosArrayClear(pBuf->pFree);
- taosHashClear(pBuf->groupSet);
taosHashClear(pBuf->all);
pBuf->numOfPages = 0; // all pages are in buffer in the first place
diff --git a/source/util/src/version.c.in b/source/util/src/version.c.in
index be1a4a404875739cdef349a901e52e195c2a9cde..cb307b57fce37ba4243aea83995e66612f3c4371 100644
--- a/source/util/src/version.c.in
+++ b/source/util/src/version.c.in
@@ -1,4 +1,4 @@
-char version[12] = "${TD_VER_NUMBER}";
+char version[64] = "${TD_VER_NUMBER}";
char compatible_version[12] = "${TD_VER_COMPATIBLE}";
char gitinfo[48] = "${TD_VER_GIT}";
char buildinfo[64] = "Built at ${TD_VER_DATE}";
diff --git a/source/util/test/hashTest.cpp b/source/util/test/hashTest.cpp
index 99f5a761c5d0d3a489176749883da981c847011d..97e67ea36e7120b5e09f1097b5fb979b6fc12224 100644
--- a/source/util/test/hashTest.cpp
+++ b/source/util/test/hashTest.cpp
@@ -197,6 +197,201 @@ void acquireRleaseTest() {
taosMemoryFreeClear(data.p);
}
+void perfTest() {
+ SHashObj* hash1h = (SHashObj*) taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash1s = (SHashObj*) taosHashInit(1000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash10s = (SHashObj*) taosHashInit(10000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash100s = (SHashObj*) taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash1m = (SHashObj*) taosHashInit(1000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash10m = (SHashObj*) taosHashInit(10000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash100m = (SHashObj*) taosHashInit(100000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+
+ char *name = (char*)taosMemoryCalloc(50000000, 9);
+ for (int64_t i = 0; i < 50000000; ++i) {
+ sprintf(name + i * 9, "t%08d", i);
+ }
+
+ for (int64_t i = 0; i < 50; ++i) {
+ taosHashPut(hash1h, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 500; ++i) {
+ taosHashPut(hash1s, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 5000; ++i) {
+ taosHashPut(hash10s, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 50000; ++i) {
+ taosHashPut(hash100s, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 500000; ++i) {
+ taosHashPut(hash1m, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 5000000; ++i) {
+ taosHashPut(hash10m, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 50000000; ++i) {
+ taosHashPut(hash100m, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ int64_t start1h = taosGetTimestampMs();
+ int64_t start1hCt = taosHashGetCompTimes(hash1h);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash1h, name + (i % 50) * 9, 9));
+ }
+ int64_t end1h = taosGetTimestampMs();
+ int64_t end1hCt = taosHashGetCompTimes(hash1h);
+
+ int64_t start1s = taosGetTimestampMs();
+ int64_t start1sCt = taosHashGetCompTimes(hash1s);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash1s, name + (i % 500) * 9, 9));
+ }
+ int64_t end1s = taosGetTimestampMs();
+ int64_t end1sCt = taosHashGetCompTimes(hash1s);
+
+ int64_t start10s = taosGetTimestampMs();
+ int64_t start10sCt = taosHashGetCompTimes(hash10s);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash10s, name + (i % 5000) * 9, 9));
+ }
+ int64_t end10s = taosGetTimestampMs();
+ int64_t end10sCt = taosHashGetCompTimes(hash10s);
+
+ int64_t start100s = taosGetTimestampMs();
+ int64_t start100sCt = taosHashGetCompTimes(hash100s);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash100s, name + (i % 50000) * 9, 9));
+ }
+ int64_t end100s = taosGetTimestampMs();
+ int64_t end100sCt = taosHashGetCompTimes(hash100s);
+
+ int64_t start1m = taosGetTimestampMs();
+ int64_t start1mCt = taosHashGetCompTimes(hash1m);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash1m, name + (i % 500000) * 9, 9));
+ }
+ int64_t end1m = taosGetTimestampMs();
+ int64_t end1mCt = taosHashGetCompTimes(hash1m);
+
+ int64_t start10m = taosGetTimestampMs();
+ int64_t start10mCt = taosHashGetCompTimes(hash10m);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash10m, name + (i % 5000000) * 9, 9));
+ }
+ int64_t end10m = taosGetTimestampMs();
+ int64_t end10mCt = taosHashGetCompTimes(hash10m);
+
+ int64_t start100m = taosGetTimestampMs();
+ int64_t start100mCt = taosHashGetCompTimes(hash100m);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash100m, name + (i % 50000000) * 9, 9));
+ }
+ int64_t end100m = taosGetTimestampMs();
+ int64_t end100mCt = taosHashGetCompTimes(hash100m);
+
+
+ SArray *sArray[1000] = {0};
+ for (int64_t i = 0; i < 1000; ++i) {
+ sArray[i] = taosArrayInit(100000, 9);
+ }
+ int64_t cap = 4;
+ while (cap < 100000000) cap = (cap << 1u);
+
+ _hash_fn_t hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ int32_t slotR = cap / 1000 + 1;
+ for (int64_t i = 0; i < 10000000; ++i) {
+ char* p = name + (i % 50000000) * 9;
+ uint32_t v = (*hashFp)(p, 9);
+ taosArrayPush(sArray[(v%cap)/slotR], p);
+ }
+ SArray *slArray = taosArrayInit(100000000, 9);
+ for (int64_t i = 0; i < 1000; ++i) {
+ int32_t num = taosArrayGetSize(sArray[i]);
+ SArray* pArray = sArray[i];
+ for (int64_t m = 0; m < num; ++m) {
+ char* p = (char*)taosArrayGet(pArray, m);
+ ASSERT(taosArrayPush(slArray, p));
+ }
+ }
+ int64_t start100mS = taosGetTimestampMs();
+ int64_t start100mSCt = taosHashGetCompTimes(hash100m);
+ int32_t num = taosArrayGetSize(slArray);
+ for (int64_t i = 0; i < num; ++i) {
+ ASSERT(taosHashGet(hash100m, (char*)TARRAY_GET_ELEM(slArray, i), 9));
+ }
+ int64_t end100mS = taosGetTimestampMs();
+ int64_t end100mSCt = taosHashGetCompTimes(hash100m);
+ for (int64_t i = 0; i < 1000; ++i) {
+ taosArrayDestroy(sArray[i]);
+ }
+ taosArrayDestroy(slArray);
+
+ printf("1h \t %" PRId64 "ms,%" PRId64 "\n", end1h - start1h, end1hCt - start1hCt);
+ printf("1s \t %" PRId64 "ms,%" PRId64 "\n", end1s - start1s, end1sCt - start1sCt);
+ printf("10s \t %" PRId64 "ms,%" PRId64 "\n", end10s - start10s, end10sCt - start10sCt);
+ printf("100s \t %" PRId64 "ms,%" PRId64 "\n", end100s - start100s, end100sCt - start100sCt);
+ printf("1m \t %" PRId64 "ms,%" PRId64 "\n", end1m - start1m, end1mCt - start1mCt);
+ printf("10m \t %" PRId64 "ms,%" PRId64 "\n", end10m - start10m, end10mCt - start10mCt);
+ printf("100m \t %" PRId64 "ms,%" PRId64 "\n", end100m - start100m, end100mCt - start100mCt);
+ printf("100mS \t %" PRId64 "ms,%" PRId64 "\n", end100mS - start100mS, end100mSCt - start100mSCt);
+
+ taosHashCleanup(hash1h);
+ taosHashCleanup(hash1s);
+ taosHashCleanup(hash10s);
+ taosHashCleanup(hash100s);
+ taosHashCleanup(hash1m);
+ taosHashCleanup(hash10m);
+ taosHashCleanup(hash100m);
+
+ SHashObj *mhash[1000] = {0};
+ for (int64_t i = 0; i < 1000; ++i) {
+ mhash[i] = (SHashObj*) taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ }
+
+ for (int64_t i = 0; i < 50000000; ++i) {
+#if 0
+ taosHashPut(mhash[i%1000], name + i * 9, 9, &i, sizeof(i));
+#else
+ taosHashPut(mhash[i/50000], name + i * 9, 9, &i, sizeof(i));
+#endif
+ }
+
+ int64_t startMhashCt = 0;
+ for (int64_t i = 0; i < 1000; ++i) {
+ startMhashCt += taosHashGetCompTimes(mhash[i]);
+ }
+
+ int64_t startMhash = taosGetTimestampMs();
+#if 0
+ for (int32_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(mhash[i%1000], name + i * 9, 9));
+ }
+#else
+// for (int64_t i = 0; i < 10000000; ++i) {
+ for (int64_t i = 0; i < 50000000; i+=5) {
+ ASSERT(taosHashGet(mhash[i/50000], name + i * 9, 9));
+ }
+#endif
+ int64_t endMhash = taosGetTimestampMs();
+ int64_t endMhashCt = 0;
+ for (int64_t i = 0; i < 1000; ++i) {
+ printf(" %" PRId64 , taosHashGetCompTimes(mhash[i]));
+ endMhashCt += taosHashGetCompTimes(mhash[i]);
+ }
+ printf("\n100m \t %" PRId64 "ms,%" PRId64 "\n", endMhash - startMhash, endMhashCt - startMhashCt);
+
+ for (int64_t i = 0; i < 1000; ++i) {
+ taosHashCleanup(mhash[i]);
+ }
+}
+
+
}
int main(int argc, char** argv) {
@@ -210,4 +405,5 @@ TEST(testCase, hashTest) {
noLockPerformanceTest();
multithreadsTest();
acquireRleaseTest();
+ //perfTest();
}
diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp
index eaf198a483aa5e3e90595d2417516aa53f754331..1a057c5875ee95de2fc3c457ca09314366fff48c 100644
--- a/source/util/test/pageBufferTest.cpp
+++ b/source/util/test/pageBufferTest.cpp
@@ -18,7 +18,7 @@ void simpleTest() {
int32_t pageId = 0;
int32_t groupId = 0;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
ASSERT_EQ(getTotalBufSize(pBuf), 1024);
@@ -29,26 +29,26 @@ void simpleTest() {
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t == pBufPage1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage4);
releaseBufPage(pBuf, pBufPage2);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage5);
@@ -64,7 +64,7 @@ void writeDownTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
*(int32_t*)(pBufPage->data) = nx;
@@ -73,22 +73,22 @@ void writeDownTest() {
setBufPageDirty(pBufPage, true);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
@@ -113,32 +113,32 @@ void recyclePageTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
releaseBufPage(pBuf, t4);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t5 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t5 == pBufPage5);
ASSERT_TRUE(pageId == 5);
diff --git a/tests/docs-examples-test/jdbc.sh b/tests/docs-examples-test/jdbc.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d71085a40306956ea8d25e9b575c97ae9945df76
--- /dev/null
+++ b/tests/docs-examples-test/jdbc.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+pgrep taosd || taosd >> /dev/null 2>&1 &
+pgrep taosadapter || taosadapter >> /dev/null 2>&1 &
+cd ../../docs/examples/java
+
+mvn clean test > jdbc-out.log 2>&1
+tail -n 20 jdbc-out.log
+
+cases=`grep 'Tests run' jdbc-out.log | awk 'END{print $3}'`
+totalJDBCCases=`echo ${cases/%,}`
+failed=`grep 'Tests run' jdbc-out.log | awk 'END{print $5}'`
+JDBCFailed=`echo ${failed/%,}`
+error=`grep 'Tests run' jdbc-out.log | awk 'END{print $7}'`
+JDBCError=`echo ${error/%,}`
+
+totalJDBCFailed=`expr $JDBCFailed + $JDBCError`
+totalJDBCSuccess=`expr $totalJDBCCases - $totalJDBCFailed`
+
+if [ "$totalJDBCSuccess" -gt "0" ]; then
+ echo -e "\n${GREEN} ### Total $totalJDBCSuccess JDBC case(s) succeed! ### ${NC}"
+fi
+
+if [ "$totalJDBCFailed" -ne "0" ]; then
+ echo -e "\n${RED} ### Total $totalJDBCFailed JDBC case(s) failed! ### ${NC}"
+ exit 8
+fi
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
index a8117ec04c79aff5c00dcfa604c1124854473d30..7071adb3a95f1c240d22a7dad6354d1e08b28400 100644
--- a/tests/pytest/tools/taosdumpTest2.py
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -11,15 +11,19 @@
# -*- coding: utf-8 -*-
+from logging.config import dictConfig
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
+import string
+import random
class TDTestCase:
+
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
@@ -47,12 +51,19 @@ class TDTestCase:
return ""
return paths[0]
+ def generateString(self, length):
+ chars = string.ascii_uppercase + string.ascii_lowercase
+ v = ""
+ for i in range(length):
+ v += random.choice(chars)
+ return v
+
def run(self):
if not os.path.exists("./taosdumptest/tmp"):
os.makedirs("./taosdumptest/tmp")
else:
- os.system("rm -rf ./taosdumptest/tmp")
- os.makedirs("./taosdumptest/tmp")
+ print("directory exists")
+ os.system("rm -rf ./taosdumptest/tmp/*")
tdSql.prepare()
@@ -76,17 +87,19 @@ class TDTestCase:
tdLog.info("taosdump found in %s" % binPath)
os.system("rm ./taosdumptest/tmp/*.sql")
+ os.system("rm ./taosdumptest/tmp/*.avro*")
+ os.system("rm -rf ./taosdumptest/taosdump.*")
os.system(
- "%s --databases db -o ./taosdumptest/tmp -B 32766 -L 1048576" %
+ "%s --databases db -o ./taosdumptest/tmp " %
binPath)
tdSql.execute("drop database db")
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(2)
- os.system("%s -i ./taosdumptest/tmp" % binPath)
+ os.system("%s -i ./taosdumptest/tmp -y" % binPath)
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(3)
tdSql.checkData(2, 0, 'db')
@@ -105,17 +118,17 @@ class TDTestCase:
"create table stb(ts timestamp, c1 binary(16374), c2 binary(16374), c3 binary(16374)) tags(t1 nchar(256))")
tdSql.execute(
"insert into t1 using stb tags('t1') values(now, '%s', '%s', '%s')" %
- ("16374",
- "16374",
- "16374"))
+ (self.generateString(16374),
+ self.generateString(16374),
+ self.generateString(16374)))
-# sys.exit(0)
os.system("rm ./taosdumptest/tmp/*.sql")
os.system("rm ./taosdumptest/tmp/*.avro*")
+ os.system("rm -rf ./taosdumptest/tmp/taosdump.*")
os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath)
tdSql.execute("drop database test")
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(3)
os.system("%s -i ./taosdumptest/tmp -y" % binPath)
diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py
index 33ef92bf735a5211044ebd37c3c8300abd8843a8..9ffebcbdad5f0fa07e26f1bb4d249643ab7bbe42 100644
--- a/tests/pytest/util/common.py
+++ b/tests/pytest/util/common.py
@@ -199,22 +199,22 @@ class TDCom:
res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0])
return res
- def cleanTb(self, type="taosc"):
+ def cleanTb(self, type="taosc", dbname="db"):
'''
type is taosc or restful
'''
- query_sql = "show stables"
+ query_sql = f"show {dbname}.stables"
res_row_list = tdSql.query(query_sql, True)
stb_list = map(lambda x: x[0], res_row_list)
for stb in stb_list:
if type == "taosc":
- tdSql.execute(f'drop table if exists `{stb}`')
+ tdSql.execute(f'drop table if exists {dbname}.`{stb}`')
if not stb[0].isdigit():
- tdSql.execute(f'drop table if exists {stb}')
+ tdSql.execute(f'drop table if exists {dbname}.{stb}')
elif type == "restful":
- self.restApiPost(f"drop table if exists `{stb}`")
+ self.restApiPost(f"drop table if exists {dbname}.`{stb}`")
if not stb[0].isdigit():
- self.restApiPost(f"drop table if exists {stb}")
+ self.restApiPost(f"drop table if exists {dbname}.{stb}")
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index e530695d1e53c4628fb28175b308b67d149c16a3..89b7fe00ebb0cf04b4570643966d553a4bccea9b 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -36,9 +36,9 @@ class TDSimClient:
"rpcDebugFlag": "143",
"tmrDebugFlag": "131",
"cDebugFlag": "143",
- "udebugFlag": "143",
- "jnidebugFlag": "143",
- "qdebugFlag": "143",
+ "uDebugFlag": "143",
+ "jniDebugFlag": "143",
+ "qDebugFlag": "143",
"supportVnodes": "1024",
"telemetryReporting": "0",
}
@@ -134,7 +134,6 @@ class TDDnode:
"uDebugFlag": "131",
"sDebugFlag": "143",
"wDebugFlag": "143",
- "qdebugFlag": "143",
"numOfLogLines": "100000000",
"statusInterval": "1",
"supportVnodes": "1024",
@@ -484,7 +483,7 @@ class TDDnode:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
-
+
onlyKillOnceWindows = 0
while(processID):
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 753c41e094701271ca3b49a53eabde1461bd1e08..b320cf5995fd0063352f0da7a2dc04933022a7d2 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -102,7 +102,7 @@ class TDSql:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
- raise Exception(repr(e))
+ raise Exception(repr(e))
i+=1
time.sleep(1)
pass
@@ -225,25 +225,21 @@ class TDSql:
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data):
- tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
else:
if self.queryResult[row][col] == _parse_datetime(data):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
return
if str(self.queryResult[row][col]) == str(data):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
return
+
elif isinstance(data, float):
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
@@ -254,21 +250,7 @@ class TDSql:
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
- if data is None:
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, str):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, datetime.date):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, float):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- else:
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
def getData(self, row, col):
self.checkRowCol(row, col)
@@ -307,7 +289,7 @@ class TDSql:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
- raise Exception(repr(e))
+ raise Exception(repr(e))
i+=1
time.sleep(1)
pass
@@ -329,7 +311,7 @@ class TDSql:
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
def __check_equal(self, elm, expect_elm):
- if not type(elm) in(list, tuple) and elm == expect_elm:
+ if elm == expect_elm:
return True
if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
if len(elm) != len(expect_elm):
diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c
index ada2039460b431363555025ec7984f6b2f1b354a..f39d5e6528275900350ffaefbee18d43ce9a9e81 100644
--- a/tests/script/api/batchprepare.c
+++ b/tests/script/api/batchprepare.c
@@ -2598,7 +2598,6 @@ void runAll(TAOS *taos) {
printf("%s Begin\n", gCaseCtrl.caseCatalog);
runCaseList(taos);
-#if 0
strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.precision = TIME_PRECISION_MICRO;
@@ -2654,7 +2653,6 @@ void runAll(TAOS *taos) {
gCaseCtrl.bindColNum = 6;
runCaseList(taos);
gCaseCtrl.bindColNum = 0;
-#endif
/*
strcpy(gCaseCtrl.caseCatalog, "Bind Col Type Test");
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 97295d75e072c3c561f579bfc8cb2c15489da858..46bae734ea72901ef704969045186a10c52a9a72 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -344,6 +344,7 @@
# --- scalar ----
./test.sh -f tsim/scalar/in.sim
./test.sh -f tsim/scalar/scalar.sim
+./test.sh -f tsim/scalar/filter.sim
# ---- alter ----
./test.sh -f tsim/alter/cached_schema_after_alter.sim
diff --git a/tests/script/tmp/monitor.sim b/tests/script/tmp/monitor.sim
index 8eb787e95035a106e0c1141a9f8d0de6584c26c3..b410e1b6ad99e8bd83dcf7dd3cf0f3c4961d0ad4 100644
--- a/tests/script/tmp/monitor.sim
+++ b/tests/script/tmp/monitor.sim
@@ -4,6 +4,7 @@ system sh/cfg.sh -n dnode1 -c monitorfqdn -v localhost
system sh/cfg.sh -n dnode1 -c monitorport -v 80
system sh/cfg.sh -n dnode1 -c monitorInterval -v 1
system sh/cfg.sh -n dnode1 -c monitorComp -v 1
+system sh/cfg.sh -n dnode1 -c uptimeInterval -v 3
#system sh/cfg.sh -n dnode1 -c supportVnodes -v 128
#system sh/cfg.sh -n dnode1 -c telemetryReporting -v 1
@@ -14,13 +15,13 @@ system sh/cfg.sh -n dnode1 -c monitorComp -v 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print =============== select * from information_schema.ins_dnodes
+print =============== create database
sql create database db vgroups 2;
sql use db;
sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd";
print =============== create drop qnode 1
sql create qnode on dnode 1
-sql create snode on dnode 1
-sql create bnode on dnode 1
+#sql create snode on dnode 1
+#sql create bnode on dnode 1
diff --git a/tests/script/tsim/compute/interval.sim b/tests/script/tsim/compute/interval.sim
index 4e7960ac4ae958f2e594fbad3bb6f7b50b13ed94..dc11c20ec925be39d12d2a7d1e92bbcb1da830b1 100644
--- a/tests/script/tsim/compute/interval.sim
+++ b/tests/script/tsim/compute/interval.sim
@@ -101,7 +101,7 @@ $ms = 1601481600000 + $cc
$cc = 1 * 60000
$ms2 = 1601481600000 - $cc
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) fill(value,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) fill(value,0,0,0,0,0)
print ===> $rows
if $rows < 30 then
print expect greater than 30, actual: $rows
@@ -180,7 +180,7 @@ $ms1 = 1601481600000 + $cc
$cc = 1 * 60000
$ms2 = 1601481600000 - $cc
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) fill(value, 0,0,0,0,0)
if $rows < 30 then
return -1
endi
diff --git a/tests/script/tsim/db/basic2.sim b/tests/script/tsim/db/basic2.sim
index b7ac0b5edd8663f653cc9216bceb1eee6054331e..4f0ba4a13c18f29a758a92318c2a66c133fd28f3 100644
--- a/tests/script/tsim/db/basic2.sim
+++ b/tests/script/tsim/db/basic2.sim
@@ -4,7 +4,7 @@ system sh/exec.sh -n dnode1 -s start
sql connect
print =============== conflict stb
-sql create database db vgroups 1;
+sql create database db vgroups 4;
sql use db;
sql create table stb (ts timestamp, i int) tags (j int);
sql_error create table stb using stb tags (1);
@@ -16,6 +16,9 @@ sql_error create table ctb (ts timestamp, i int) tags (j int);
sql create table ntb (ts timestamp, i int);
sql_error create table ntb (ts timestamp, i int) tags (j int);
+sql drop table ntb
+sql create table ntb (ts timestamp, i int) tags (j int);
+
sql drop database db
print =============== create database d1
diff --git a/tests/script/tsim/parser/alter1.sim b/tests/script/tsim/parser/alter1.sim
index 9d0049e45e5437d9d6de814b744d8fce3ccd876e..369419dcd9cd91688f39c27dbd54c33ee0699ae8 100644
--- a/tests/script/tsim/parser/alter1.sim
+++ b/tests/script/tsim/parser/alter1.sim
@@ -130,4 +130,4 @@ endi
# return -1
#endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/binary_escapeCharacter.sim b/tests/script/tsim/parser/binary_escapeCharacter.sim
index 0b437d8b04a39a400b25368263f88c2b846c155a..5a9c0e7bb1d2b141639a1408ffcc4ae064dd78f8 100644
--- a/tests/script/tsim/parser/binary_escapeCharacter.sim
+++ b/tests/script/tsim/parser/binary_escapeCharacter.sim
@@ -101,4 +101,4 @@ sql_error insert into tb values(now, '\');
#sql_error insert into tb values(now, '\\\n');
sql insert into tb values(now, '\n');
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/col_arithmetic_operation.sim b/tests/script/tsim/parser/col_arithmetic_operation.sim
index f22beefdf88c3d90bff8554cc44b5768bfef3d1e..9a2ba34c85e552585770bb42913b8c83ddd58131 100644
--- a/tests/script/tsim/parser/col_arithmetic_operation.sim
+++ b/tests/script/tsim/parser/col_arithmetic_operation.sim
@@ -132,4 +132,4 @@ sql_error select max(c1-c2) from $tb
print =====================> td-1764
sql select sum(c1)/count(*), sum(c1) as b, count(*) as b from $stb interval(1y)
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/columnValue_bigint.sim b/tests/script/tsim/parser/columnValue_bigint.sim
index 2cf0151a053f3c62b7ab904156361c8705dc554e..0a024029a534232aa5829d0cf59807f8c65d71f2 100644
--- a/tests/script/tsim/parser/columnValue_bigint.sim
+++ b/tests/script/tsim/parser/columnValue_bigint.sim
@@ -373,7 +373,7 @@ sql_error insert into st_bigint_e7 values (now, "123abc")
sql_error insert into st_bigint_e9 values (now, abc)
sql_error insert into st_bigint_e10 values (now, "abc")
sql_error insert into st_bigint_e11 values (now, " ")
-sql insert into st_bigint_e12 values (now, '')
+sql_error insert into st_bigint_e12 values (now, '')
sql_error insert into st_bigint_e13 using mt_bigint tags (033) values (now, 9223372036854775808)
sql insert into st_bigint_e14 using mt_bigint tags (033) values (now, -9223372036854775808)
@@ -386,7 +386,7 @@ sql_error insert into st_bigint_e20 using mt_bigint tags (033) values (now, "123
sql_error insert into st_bigint_e22 using mt_bigint tags (033) values (now, abc)
sql_error insert into st_bigint_e23 using mt_bigint tags (033) values (now, "abc")
sql_error insert into st_bigint_e24 using mt_bigint tags (033) values (now, " ")
-sql insert into st_bigint_e25 using mt_bigint tags (033) values (now, '')
+sql_error insert into st_bigint_e25 using mt_bigint tags (033) values (now, '')
sql_error insert into st_bigint_e13_0 using mt_bigint tags (9223372036854775808) values (now, -033)
sql insert into st_bigint_e14_0 using mt_bigint tags (-9223372036854775808) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_double.sim b/tests/script/tsim/parser/columnValue_double.sim
index da09b77353fc12f237e4fb94dc99b69bd8bec9c1..bfcf338faca3669b18b2e821f10b5e3b4b0f328e 100644
--- a/tests/script/tsim/parser/columnValue_double.sim
+++ b/tests/script/tsim/parser/columnValue_double.sim
@@ -476,7 +476,7 @@ sql_error insert into st_double_e7 values (now, "123abc")
sql_error insert into st_double_e9 values (now, abc)
sql_error insert into st_double_e10 values (now, "abc")
sql_error insert into st_double_e11 values (now, " ")
-sql insert into st_double_e12 values (now, '')
+sql_error insert into st_double_e12 values (now, '')
sql_error insert into st_double_e13 using mt_double tags (033) values (now, 11.7976931348623157e+308)
sql_error insert into st_double_e14 using mt_double tags (033) values (now, -11.7976931348623157e+308)
@@ -489,7 +489,7 @@ sql_error insert into st_double_e20 using mt_double tags (033) values (now, "123
sql_error insert into st_double_e22 using mt_double tags (033) values (now, abc)
sql_error insert into st_double_e23 using mt_double tags (033) values (now, "abc")
sql_error insert into st_double_e24 using mt_double tags (033) values (now, " ")
-sql insert into st_double_e25_1 using mt_double tags (033) values (now, '')
+sql_error insert into st_double_e25_1 using mt_double tags (033) values (now, '')
sql_error insert into st_double_e13 using mt_double tags (31.7976931348623157e+308) values (now, -033)
sql_error insert into st_double_e14 using mt_double tags (-31.7976931348623157e+308) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_float.sim b/tests/script/tsim/parser/columnValue_float.sim
index 3e20e178c373b9bf55b4be0e666e27e3d5787447..b2db7dff2b875d8839e1bdf356c40ba4d6d9e916 100644
--- a/tests/script/tsim/parser/columnValue_float.sim
+++ b/tests/script/tsim/parser/columnValue_float.sim
@@ -506,7 +506,7 @@ sql_error insert into st_float_e7 values (now, "123abc")
sql_error insert into st_float_e9 values (now, abc)
sql_error insert into st_float_e10 values (now, "abc")
sql_error insert into st_float_e11 values (now, " ")
-sql insert into st_float_e12 values (now, '')
+sql_error insert into st_float_e12 values (now, '')
sql_error insert into st_float_e13 using mt_float tags (033) values (now, 3.50282347e+38)
sql_error insert into st_float_e14 using mt_float tags (033) values (now, -3.50282347e+38)
@@ -519,7 +519,7 @@ sql_error insert into st_float_e20 using mt_float tags (033) values (now, "123ab
sql_error insert into st_float_e22 using mt_float tags (033) values (now, abc)
sql_error insert into st_float_e23 using mt_float tags (033) values (now, "abc")
sql_error insert into st_float_e24 using mt_float tags (033) values (now, " ")
-sql insert into st_float_e25_1 using mt_float tags (033) values (now, '')
+sql_error insert into st_float_e25_1 using mt_float tags (033) values (now, '')
sql_error insert into st_float_e13 using mt_float tags (3.50282347e+38) values (now, -033)
sql_error insert into st_float_e14 using mt_float tags (-3.50282347e+38) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_int.sim b/tests/script/tsim/parser/columnValue_int.sim
index 009fbd1ede718245b6145b9a292d9c325d2a872c..4a3b8ebd0ba46a08ee737de44cf07c1cb10aa555 100644
--- a/tests/script/tsim/parser/columnValue_int.sim
+++ b/tests/script/tsim/parser/columnValue_int.sim
@@ -371,7 +371,7 @@ sql_error insert into st_int_e7 values (now, "123abc")
sql_error insert into st_int_e9 values (now, abc)
sql_error insert into st_int_e10 values (now, "abc")
sql_error insert into st_int_e11 values (now, " ")
-sql insert into st_int_e12 values (now, '')
+sql_error insert into st_int_e12 values (now, '')
sql_error insert into st_int_e13 using mt_int tags (033) values (now, 2147483648)
sql insert into st_int_e14 using mt_int tags (033) values (now, -2147483648)
@@ -384,7 +384,7 @@ sql_error insert into st_int_e20 using mt_int tags (033) values (now, "123abc")
sql_error insert into st_int_e22 using mt_int tags (033) values (now, abc)
sql_error insert into st_int_e23 using mt_int tags (033) values (now, "abc")
sql_error insert into st_int_e24 using mt_int tags (033) values (now, " ")
-sql insert into st_int_e25 using mt_int tags (033) values (now, '')
+sql_error insert into st_int_e25 using mt_int tags (033) values (now, '')
sql_error insert into st_int_e13 using mt_int tags (2147483648) values (now, -033)
sql insert into st_int_e14_1 using mt_int tags (-2147483648) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_smallint.sim b/tests/script/tsim/parser/columnValue_smallint.sim
index 0dcb0d85f4f9dad62dc71aa7fd2df916c6fd7a63..eb364f36302df811549a968eaf5bf05a823b84b3 100644
--- a/tests/script/tsim/parser/columnValue_smallint.sim
+++ b/tests/script/tsim/parser/columnValue_smallint.sim
@@ -374,7 +374,7 @@ sql_error insert into st_smallint_e7 values (now, "123abc")
sql_error insert into st_smallint_e9 values (now, abc)
sql_error insert into st_smallint_e10 values (now, "abc")
sql_error insert into st_smallint_e11 values (now, " ")
-sql insert into st_smallint_e12 values (now, '')
+sql_error insert into st_smallint_e12 values (now, '')
sql_error insert into st_smallint_e13 using mt_smallint tags (033) values (now, 32768)
sql insert into st_smallint_e14_1 using mt_smallint tags (033) values (now, -32768)
@@ -387,7 +387,7 @@ sql_error insert into st_smallint_e20 using mt_smallint tags (033) values (now,
sql_error insert into st_smallint_e22 using mt_smallint tags (033) values (now, abc)
sql_error insert into st_smallint_e23 using mt_smallint tags (033) values (now, "abc")
sql_error insert into st_smallint_e24 using mt_smallint tags (033) values (now, " ")
-sql insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '')
+sql_error insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '')
sql_error insert into st_smallint_e13 using mt_smallint tags (32768) values (now, -033)
sql insert into st_smallint_e14 using mt_smallint tags (-32768) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_tinyint.sim b/tests/script/tsim/parser/columnValue_tinyint.sim
index 62ae4e5228f94e466dd0bc44e681b7b4b940fcdb..d7938aa739fb3584c8fd549005bd02e44ac39bc0 100644
--- a/tests/script/tsim/parser/columnValue_tinyint.sim
+++ b/tests/script/tsim/parser/columnValue_tinyint.sim
@@ -372,7 +372,7 @@ sql_error insert into st_tinyint_e7 values (now, "123abc")
sql_error insert into st_tinyint_e9 values (now, abc)
sql_error insert into st_tinyint_e10 values (now, "abc")
sql_error insert into st_tinyint_e11 values (now, " ")
-sql insert into st_tinyint_e12 values (now, '')
+sql_error insert into st_tinyint_e12 values (now, '')
sql_error insert into st_tinyint_e13 using mt_tinyint tags (033) values (now, 128)
sql insert into st_tinyint_e14_1 using mt_tinyint tags (033) values (now, -128)
@@ -385,7 +385,7 @@ sql_error insert into st_tinyint_e20 using mt_tinyint tags (033) values (now, "1
sql_error insert into st_tinyint_e22 using mt_tinyint tags (033) values (now, abc)
sql_error insert into st_tinyint_e23 using mt_tinyint tags (033) values (now, "abc")
sql_error insert into st_tinyint_e24 using mt_tinyint tags (033) values (now, " ")
-sql insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '')
+sql_error insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '')
sql_error insert into st_tinyint_e13 using mt_tinyint tags (128) values (now, -033)
sql insert into st_tinyint_e14 using mt_tinyint tags (-128) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_unsign.sim b/tests/script/tsim/parser/columnValue_unsign.sim
index 85ff490bf4e520cdbbc0ed0008499af4425b2b93..7ae1b20eca18236c71277ae2c94a0976181a271a 100644
--- a/tests/script/tsim/parser/columnValue_unsign.sim
+++ b/tests/script/tsim/parser/columnValue_unsign.sim
@@ -129,4 +129,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/fill.sim b/tests/script/tsim/parser/fill.sim
index f688d815e79fb76ce536fd75a1312230306dda41..4892345e12ed4b22a1c3d96ae2e6233e7e9fe642 100644
--- a/tests/script/tsim/parser/fill.sim
+++ b/tests/script/tsim/parser/fill.sim
@@ -47,31 +47,10 @@ $tsu = $tsu + $ts0
## fill syntax test
# number of fill values exceeds number of selected columns
-sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
-if $data14 != 6.000000000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
# number of fill values is smaller than number of selected columns
-sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
# unspecified filling method
sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
@@ -182,7 +161,7 @@ endi
# min_with_fill
print min_with_fill
-sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -216,7 +195,7 @@ endi
# first_with_fill
print first_with_fill
-sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -305,7 +284,7 @@ endi
# last_with_fill
print last_with_fill
-sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -351,7 +330,7 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -376,37 +355,25 @@ endi
# fill_into_nonarithmetic_fieds
print select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-if $data01 != 1 then
- return -1
-endi
-if $data11 != NULL then
- return -1
-endi
+sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
print select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
@@ -416,13 +383,7 @@ if $data01 != 1 then
return -1
endi
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
## linear fill
# feature currently switched off 2018/09/29
@@ -859,8 +820,8 @@ sql insert into tm0 values('2020-1-1 1:3:8', 8);
sql insert into tm0 values('2020-1-1 1:3:9', 9);
sql insert into tm0 values('2020-1-1 1:4:10', 10);
-print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
+print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90);
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90);
if $rows != 8 then
return -1
endi
@@ -958,14 +919,14 @@ if $data12 != NULL then
return -1
endi
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) ;
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90) ;
if $rows != 21749 then
print expect 21749, actual: $rows
return -1
endi
-print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ;
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ;
+print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89) ;
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89) ;
if $rows != 8 then
return -1
endi
diff --git a/tests/script/tsim/parser/fill_stb.sim b/tests/script/tsim/parser/fill_stb.sim
index 656b1ac94e8e0954e98b1d10692afc5d696bfd64..6c61631aa8b3a682b75317943ddeb3642720f588 100644
--- a/tests/script/tsim/parser/fill_stb.sim
+++ b/tests/script/tsim/parser/fill_stb.sim
@@ -279,7 +279,7 @@ endi
#endi
## linear fill
-sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(linear)
+sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(linear)
$val = $rowNum * 2
$val = $val - 1
$val = $val * $tbNum
diff --git a/tests/script/tsim/parser/fill_us.sim b/tests/script/tsim/parser/fill_us.sim
index 0a45c02f58a039baa22d5c71fff04d8e56a6fed6..f760ba3577281fa358e0da9180624b7de2e69b76 100644
--- a/tests/script/tsim/parser/fill_us.sim
+++ b/tests/script/tsim/parser/fill_us.sim
@@ -48,32 +48,11 @@ $tsu = $tsu + $ts0
## fill syntax test
# number of fill values exceeds number of selected columns
print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
-if $data14 != 6.000000000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
# number of fill values is smaller than number of selected columns
print sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
# unspecified filling method
sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
@@ -185,7 +164,7 @@ endi
# min_with_fill
print min_with_fill
-sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -219,7 +198,7 @@ endi
# first_with_fill
print first_with_fill
-sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -308,7 +287,7 @@ endi
# last_with_fill
print last_with_fill
-sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -353,7 +332,7 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select _wstart, sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -379,34 +358,24 @@ endi
# fill_into_nonarithmetic_fieds
-sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-#if $data11 != 20000000 then
-if $data11 != NULL then
- return -1
-endi
+sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
@@ -416,14 +385,7 @@ if $data01 != 1 then
return -1
endi
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
-
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
## linear fill
# feature currently switched off 2018/09/29
diff --git a/tests/script/tsim/parser/groupby.sim b/tests/script/tsim/parser/groupby.sim
index 12a698b1ccb2273d10c1831948103ab88f494d54..4ee9c530a79c72ccac12a99922af1eeefc7485ed 100644
--- a/tests/script/tsim/parser/groupby.sim
+++ b/tests/script/tsim/parser/groupby.sim
@@ -557,7 +557,7 @@ if $data10 != @{slop:0.000000, intercept:1.000000}@ then
return -1
endi
-if $data90 != @{slop:0.000000, intercept:9.000000}@ then
+if $data90 != @{slop:0.000000, intercept:17.000000}@ then
return -1
endi
diff --git a/tests/script/tsim/parser/import_file.sim b/tests/script/tsim/parser/import_file.sim
index e031e0249dd5a3b9efec7b9fed2505671f645e2c..37dc0c447623a8ea54f8d0e7228e38749e7a41be 100644
--- a/tests/script/tsim/parser/import_file.sim
+++ b/tests/script/tsim/parser/import_file.sim
@@ -69,4 +69,4 @@ endi
system rm -f $inFileName
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/repeatAlter.sim b/tests/script/tsim/parser/repeatAlter.sim
index d28a03e193a031ee95b5d237481de8ed31651877..b4012048cc314682e6bdb971a8e4a97fb1c2ca65 100644
--- a/tests/script/tsim/parser/repeatAlter.sim
+++ b/tests/script/tsim/parser/repeatAlter.sim
@@ -6,4 +6,4 @@ while $i <= $loops
$i = $i + 1
endw
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/select_from_cache_disk.sim b/tests/script/tsim/parser/select_from_cache_disk.sim
index 0983e36a3a579f88bdb429e9ad62a67c4fe6823b..3c0b13c6388c2386da011b2576262b65a6f018d5 100644
--- a/tests/script/tsim/parser/select_from_cache_disk.sim
+++ b/tests/script/tsim/parser/select_from_cache_disk.sim
@@ -60,4 +60,4 @@ if $data12 != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/single_row_in_tb.sim b/tests/script/tsim/parser/single_row_in_tb.sim
index 1bd53ad24ef17c89bf5bfd1ddec9ed78b969cf9a..e7b4c9a871b4d8409a8a1624ff83b71fb77a77c2 100644
--- a/tests/script/tsim/parser/single_row_in_tb.sim
+++ b/tests/script/tsim/parser/single_row_in_tb.sim
@@ -33,4 +33,4 @@ print ================== server restart completed
run tsim/parser/single_row_in_tb_query.sim
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/single_row_in_tb_query.sim b/tests/script/tsim/parser/single_row_in_tb_query.sim
index 422756b798cbeb1b3e70d14d952457df0e54a202..37e193f9d202c0f94748342f3a67e1565e8490d3 100644
--- a/tests/script/tsim/parser/single_row_in_tb_query.sim
+++ b/tests/script/tsim/parser/single_row_in_tb_query.sim
@@ -195,4 +195,4 @@ endi
print ===============>safty check TD-4927
sql select first(ts, c1) from sr_stb where ts<1 group by t1;
-sql select first(ts, c1) from sr_stb where ts>0 and ts<1;
\ No newline at end of file
+sql select first(ts, c1) from sr_stb where ts>0 and ts<1;
diff --git a/tests/script/tsim/parser/slimit_query.sim b/tests/script/tsim/parser/slimit_query.sim
index 1e04a31099b0a9d948d1fd5fff229b0db940390c..acf0489d3c667834f630b41977240da86dcf4cfd 100644
--- a/tests/script/tsim/parser/slimit_query.sim
+++ b/tests/script/tsim/parser/slimit_query.sim
@@ -93,25 +93,25 @@ if $rows != 3 then
endi
### slimit + fill
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 5 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 0 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 5 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 0 offset 0
if $rows != 0 then
return -1
endi
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
-print select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
+print select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
print $rows $data00 $data01 $data02 $data03
if $rows != 8 then
return -1
endi
# desc
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
if $rows != 8 then
return -1
endi
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 598
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 598
if $rows != 4 then
return -1
endi
diff --git a/tests/script/tsim/parser/timestamp_query.sim b/tests/script/tsim/parser/timestamp_query.sim
index 6e92dbcb3ab28518dc452e474aee955a3003c596..24058cbc84912033b41f49b3e05ee2fecbe4d221 100644
--- a/tests/script/tsim/parser/timestamp_query.sim
+++ b/tests/script/tsim/parser/timestamp_query.sim
@@ -28,7 +28,7 @@ sql select * from ts_stb0 where ts <> $ts0
##### select from supertable
$tb = $tbPrefix . 0
-sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1)
+sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1, -1)
$res = $rowNum * 2
$n = $res - 2
print ============>$n
@@ -47,7 +47,7 @@ if $data13 != 598.000000000 then
return -1
endi
-sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL)
+sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL, NULL)
if $data13 != 598.000000000 then
print expect 598.000000000, actual $data03
return -1
diff --git a/tests/script/tsim/query/complex_group.sim b/tests/script/tsim/query/complex_group.sim
index 3dad8059cd148504118d56a63f60b25247dc0fb6..d7d14c0ee82b3e10e06f509b4e6a7821be9c901f 100644
--- a/tests/script/tsim/query/complex_group.sim
+++ b/tests/script/tsim/query/complex_group.sim
@@ -454,4 +454,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/complex_having.sim b/tests/script/tsim/query/complex_having.sim
index 9e28c3803e373e1d973b34c39573b4a7ec4f13f3..4c0af6d10c2d796638be619c6092618217b01257 100644
--- a/tests/script/tsim/query/complex_having.sim
+++ b/tests/script/tsim/query/complex_having.sim
@@ -365,4 +365,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/complex_limit.sim b/tests/script/tsim/query/complex_limit.sim
index 2a90e7ff1d1f1a4ba25f79a94339219f3d4f5683..acb133f6504f8076161476cfcf6b8f73493157fc 100644
--- a/tests/script/tsim/query/complex_limit.sim
+++ b/tests/script/tsim/query/complex_limit.sim
@@ -508,4 +508,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/complex_select.sim b/tests/script/tsim/query/complex_select.sim
index f4c9877bfd4c32622238cf21eafac8c35aaafa19..b7697e5cab0e654a40dd16f55f57cfbba4c5653e 100644
--- a/tests/script/tsim/query/complex_select.sim
+++ b/tests/script/tsim/query/complex_select.sim
@@ -558,4 +558,4 @@ if $data00 != 33 then
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/complex_where.sim b/tests/script/tsim/query/complex_where.sim
index bda1c036f02ded7953f8049a46318479b5feb106..847f67ed3461a88c16e1697386f8ee0d6f91d438 100644
--- a/tests/script/tsim/query/complex_where.sim
+++ b/tests/script/tsim/query/complex_where.sim
@@ -669,4 +669,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/crash_sql.sim b/tests/script/tsim/query/crash_sql.sim
index 1d20491869db719c84065fb6a765268c7366c80b..79a9165e6602b1e8b1931e0f3ad9bf7d0168450f 100644
--- a/tests/script/tsim/query/crash_sql.sim
+++ b/tests/script/tsim/query/crash_sql.sim
@@ -76,7 +76,7 @@ sql insert into ct4 values ( '2022-05-21 01:01:01.000', NULL, NULL, NULL, NULL,
print ================ start query ======================
-print ================ SQL used to cause taosd or taos shell crash
+print ================ SQL used to cause taosd or TDengine CLI crash
sql_error select sum(c1) ,count(c1) from ct4 group by c1 having sum(c10) between 0 and 1 ;
-#system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+#system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/diff.sim b/tests/script/tsim/query/diff.sim
index f0d82b01e92bdffc06f951a5d3911ae4338037d9..badd139a9f7b25aa4192e3f97b0cefe825efc597 100644
--- a/tests/script/tsim/query/diff.sim
+++ b/tests/script/tsim/query/diff.sim
@@ -25,17 +25,17 @@ $i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
-
+
$x = 0
while $x < $rowNum
$cc = $x * 60000
$ms = 1601481600000 + $cc
- sql insert into $tb values ($ms , $x )
+ sql insert into $tb values ($ms , $x )
$x = $x + 1
- endw
-
+ endw
+
$i = $i + 1
-endw
+endw
sleep 100
@@ -61,7 +61,7 @@ sql select _rowts, diff(tbcol) from $tb where ts > $ms
print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
-if $data11 != 1 then
+if $data11 != 1 then
return -1
endi
@@ -72,7 +72,7 @@ sql select _rowts, diff(tbcol) from $tb where ts <= $ms
print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
-if $data11 != 1 then
+if $data11 != 1 then
return -1
endi
@@ -82,7 +82,7 @@ sql select _rowts, diff(tbcol) as b from $tb
print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
-if $data11 != 1 then
+if $data11 != 1 then
return -1
endi
@@ -107,4 +107,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim
index 30a857815ceea75b399c1cf37c351ff80e37189d..2871252d91b822e02911931bf2c8a848472a5e9d 100644
--- a/tests/script/tsim/query/explain.sim
+++ b/tests/script/tsim/query/explain.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database db1 vgroups 3;
sql use db1;
sql select * from information_schema.ins_databases;
@@ -30,7 +30,7 @@ sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..2
#sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..229.20 rows=101 width=244) (actual time=0.080..0.526 rows=100 loops=1)");
-print ======== step2
+print ======== step2
sql explain select * from st1 where -2;
sql explain select ts from tb1;
sql explain select * from st1;
@@ -41,14 +41,14 @@ sql explain select count(*),sum(f1) from st1;
sql explain select count(*),sum(f1) from st1 group by f1;
#sql explain select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev);
-print ======== step3
+print ======== step3
sql explain verbose true select * from st1 where -2;
sql explain verbose true select ts from tb1 where f1 > 0;
sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00';
sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1;
sql explain verbose true select * from information_schema.ins_stables where db_name='db2';
-print ======== step4
+print ======== step4
sql explain analyze select ts from st1 where -2;
sql explain analyze select ts from tb1;
sql explain analyze select ts from st1;
@@ -59,7 +59,7 @@ sql explain analyze select count(*),sum(f1) from tb1;
sql explain analyze select count(*),sum(f1) from st1;
sql explain analyze select count(*),sum(f1) from st1 group by f1;
-print ======== step5
+print ======== step5
sql explain analyze verbose true select ts from st1 where -2;
sql explain analyze verbose true select ts from tb1;
sql explain analyze verbose true select ts from st1;
@@ -87,12 +87,12 @@ sql explain analyze verbose true select count(f1) from st1 group by tbname;
#sql explain select * from tb1, tb2 where tb1.ts=tb2.ts;
#sql explain select * from st1, st2 where tb1.ts=tb2.ts;
#sql explain analyze verbose true select sum(a+b) from (select _rowts, min(f1) b,count(*) a from st1 where f1 > 0 interval(1a)) where a < 0 interval(1s);
-#sql explain select min(f1) from st1 interval(1m, 2a) sliding(30s);
+#sql explain select min(f1) from st1 interval(1m, 2a) sliding(30s);
#sql explain verbose true select count(*),sum(f1) from st1 where f1 > 0 and ts > '2021-10-31 00:00:00' group by f1 having sum(f1) > 0;
-#sql explain analyze select min(f1) from st1 interval(3m, 2a) sliding(1m);
+#sql explain analyze select min(f1) from st1 interval(3m, 2a) sliding(1m);
#sql explain analyze select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev);
#sql explain analyze verbose true select count(*),sum(f1) from st1 where f1 > 0 and ts > '2021-10-31 00:00:00' group by f1 having sum(f1) > 0;
-#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m);
+#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/interval.sim b/tests/script/tsim/query/interval.sim
index cc8a73daec1ad54fb1448480b0efd317bbd09be9..833da4a8ba2b3daf495167f06c99d222564a6bf3 100644
--- a/tests/script/tsim/query/interval.sim
+++ b/tests/script/tsim/query/interval.sim
@@ -177,4 +177,4 @@ print =============== clear
# return -1
#endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/scalarFunction.sim b/tests/script/tsim/query/scalarFunction.sim
index 103e66e54e674c10e3fbe3bd88e044ffe7d0041d..1b8115fec635832116f722ce1fb22810d817a0b7 100644
--- a/tests/script/tsim/query/scalarFunction.sim
+++ b/tests/script/tsim/query/scalarFunction.sim
@@ -33,7 +33,7 @@ print =============== create normal table
sql create table ntb (ts timestamp, c1 int, c2 float, c3 double)
sql show tables
-if $rows != 101 then
+if $rows != 101 then
return -1
endi
@@ -444,7 +444,7 @@ if $loop_test == 0 then
print =============== stop and restart taosd
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
-
+
$loop_cnt = 0
check_dnode_ready_0:
$loop_cnt = $loop_cnt + 1
@@ -462,7 +462,7 @@ if $loop_test == 0 then
goto check_dnode_ready_0
endi
- $loop_test = 1
+ $loop_test = 1
goto loop_test_pos
endi
diff --git a/tests/script/tsim/query/scalarNull.sim b/tests/script/tsim/query/scalarNull.sim
index ec95c94f23c12babb06b25b06ce140c9a4a5368a..6abe3d62d9b1aaf88872054c5bd040098400debb 100644
--- a/tests/script/tsim/query/scalarNull.sim
+++ b/tests/script/tsim/query/scalarNull.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database db1 vgroups 3;
sql use db1;
sql select * from information_schema.ins_databases;
diff --git a/tests/script/tsim/query/session.sim b/tests/script/tsim/query/session.sim
index 158448d76537947d1f6a0fb8d9569becc33fcdd8..b6eb4ed3aa2aae6873eed4fb0c8056c95ebe6bb6 100644
--- a/tests/script/tsim/query/session.sim
+++ b/tests/script/tsim/query/session.sim
@@ -35,8 +35,8 @@ sql INSERT INTO dev_001 VALUES('2020-05-13 13:00:00.001', 12)
sql INSERT INTO dev_001 VALUES('2020-05-14 13:00:00.001', 13)
sql INSERT INTO dev_001 VALUES('2020-05-15 14:00:00.000', 14)
sql INSERT INTO dev_001 VALUES('2020-05-20 10:00:00.000', 15)
-sql INSERT INTO dev_001 VALUES('2020-05-27 10:00:00.001', 16)
-
+sql INSERT INTO dev_001 VALUES('2020-05-27 10:00:00.001', 16)
+
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.000', 1)
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.005', 2)
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.009', 3)
@@ -46,7 +46,7 @@ sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.036', 6)
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.51', 7)
# vnode does not return the precision of the table
-print ====> create database d1 precision 'us'
+print ====> create database d1 precision 'us'
sql create database d1 precision 'us'
sql use d1
sql create table dev_001 (ts timestamp ,i timestamp ,j int)
@@ -54,7 +54,7 @@ sql insert into dev_001 values(1623046993681000,now,1)(1623046993681001,now+1s,2
sql create table secondts(ts timestamp,t2 timestamp,i int)
sql insert into secondts values(1623046993681000,now,1)(1623046993681001,now+1s,2)(1623046993681002,now+2s,3)(1623046993681004,now+5s,4)
-$loop_test = 0
+$loop_test = 0
loop_test_pos:
sql use $dbNamme
@@ -299,7 +299,7 @@ if $loop_test == 0 then
print =============== stop and restart taosd
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
-
+
$loop_cnt = 0
check_dnode_ready_0:
$loop_cnt = $loop_cnt + 1
@@ -317,7 +317,7 @@ if $loop_test == 0 then
goto check_dnode_ready_0
endi
- $loop_test = 1
+ $loop_test = 1
goto loop_test_pos
endi
diff --git a/tests/script/tsim/query/stddev.sim b/tests/script/tsim/query/stddev.sim
index d61c7273e19ebee84cd0117a9faf163c3a854005..b45c7d80a3edd8319f199e07fd607ab4f474df23 100644
--- a/tests/script/tsim/query/stddev.sim
+++ b/tests/script/tsim/query/stddev.sim
@@ -409,4 +409,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/time_process.sim b/tests/script/tsim/query/time_process.sim
index b3c0e9561f149445a7ae75036736bbf6f8eaf4a4..83a64458465d6d978a38a206b2a7b223cb2bf45d 100644
--- a/tests/script/tsim/query/time_process.sim
+++ b/tests/script/tsim/query/time_process.sim
@@ -111,4 +111,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim
index 7cc1403bcb215547209b1c41dcf0351f9fc80bfd..7f8b1044ef528a3a771946f878167b1123ddd9db 100644
--- a/tests/script/tsim/query/udf.sim
+++ b/tests/script/tsim/query/udf.sim
@@ -9,7 +9,7 @@ system sh/cfg.sh -n dnode1 -c udf -v 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1 udf
+print ======== step1 udf
system sh/compile_udf.sh
sql create database udf vgroups 3;
sql use udf;
diff --git a/tests/script/tsim/scalar/filter.sim b/tests/script/tsim/scalar/filter.sim
new file mode 100644
index 0000000000000000000000000000000000000000..923166227856189e91848150ed9e848f946b066d
--- /dev/null
+++ b/tests/script/tsim/scalar/filter.sim
@@ -0,0 +1,38 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ======== step1
+sql drop database if exists db1;
+sql create database db1 vgroups 3;
+sql use db1;
+sql create stable st1 (fts timestamp, fbool bool, ftiny tinyint, fsmall smallint, fint int, fbig bigint, futiny tinyint unsigned, fusmall smallint unsigned, fuint int unsigned, fubig bigint unsigned, ffloat float, fdouble double, fbin binary(10), fnchar nchar(10)) tags(tts timestamp, tbool bool, ttiny tinyint, tsmall smallint, tint int, tbig bigint, tutiny tinyint unsigned, tusmall smallint unsigned, tuint int unsigned, tubig bigint unsigned, tfloat float, tdouble double, tbin binary(10), tnchar nchar(10));
+sql create table tb1 using st1 tags('2022-07-10 16:31:00', true, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql create table tb2 using st1 tags('2022-07-10 16:32:00', false, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql create table tb3 using st1 tags('2022-07-10 16:33:00', true, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+
+sql insert into tb1 values ('2022-07-10 16:31:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb1 values ('2022-07-10 16:31:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb1 values ('2022-07-10 16:31:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb1 values ('2022-07-10 16:31:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb1 values ('2022-07-10 16:31:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql insert into tb2 values ('2022-07-10 16:32:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb2 values ('2022-07-10 16:32:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb2 values ('2022-07-10 16:32:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql insert into tb3 values ('2022-07-10 16:33:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb3 values ('2022-07-10 16:33:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql select * from st1 where (ttiny > 2 or ftiny < 5) and ftiny > 2;
+if $rows != 7 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_comment.sim b/tests/script/tsim/stable/alter_comment.sim
index beb049985c90ca7f8c7521c004c950609cc05347..7c2d6edfcbe48e6e4afc55536e893cbc7d0dbc20 100644
--- a/tests/script/tsim/stable/alter_comment.sim
+++ b/tests/script/tsim/stable/alter_comment.sim
@@ -95,7 +95,7 @@ sql_error alter table db.stb add tag t1 int
sql_error alter table db.stb add tag t2 int
sql_error alter table db.stb add tag t3 int
sql alter table db.stb add tag t4 bigint
-sql alter table db.stb add tag c1 int
+sql alter table db.stb add tag c1 int
sql alter table db.stb add tag t5 binary(12)
sql select * from information_schema.ins_stables where db_name = 'db'
diff --git a/tests/script/tsim/stable/alter_count.sim b/tests/script/tsim/stable/alter_count.sim
index 83ea4b14fa733821316814dc6b4f47c7f239e1e8..4a2aeca029175c73a82d622b59777782f27639ab 100644
--- a/tests/script/tsim/stable/alter_count.sim
+++ b/tests/script/tsim/stable/alter_count.sim
@@ -5,8 +5,8 @@ print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
-sql create database d1 replica 1 duration 7 keep 50
+print ======== step1
+sql create database d1 replica 1 duration 7 keep 50
sql use d1
sql create table tb (ts timestamp, a int)
sql insert into tb values(now-28d, -28)
@@ -83,7 +83,7 @@ if $data00 != 3 then
endi
print ======== step8
-# sql alter table tb(ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10) )
+# sql alter table tb(ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10) )
sql alter table tb add column h binary(10)
sql insert into tb values(now-7d, -7, 18, 0, 0, 0, 0, 0, '0')
sql insert into tb values(now-6d, -6, 19, 1, 1, 1, 1, 1, '1')
@@ -260,4 +260,4 @@ if $data00 != 31 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_import.sim b/tests/script/tsim/stable/alter_import.sim
index b968eb6a124a8f8d232f03090e4ce67b06be735e..7431ea698acbe5f504e5d8c6abf64c1877420fd5 100644
--- a/tests/script/tsim/stable/alter_import.sim
+++ b/tests/script/tsim/stable/alter_import.sim
@@ -5,7 +5,7 @@ print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d1 replica 1 duration 7 keep 50
sql use d1
sql create table tb (ts timestamp, a int)
@@ -42,4 +42,4 @@ if $data00 != 6 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_insert1.sim b/tests/script/tsim/stable/alter_insert1.sim
index bcea0b48c4032fa1d0ddd56a2c467559a39e8a77..0e5617e92d63a049e288c318e007bfdfd79e7b9b 100644
--- a/tests/script/tsim/stable/alter_insert1.sim
+++ b/tests/script/tsim/stable/alter_insert1.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d3
sql use d3
sql create table tb (ts timestamp, a int)
@@ -1137,4 +1137,4 @@ if $data79 != null then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_insert2.sim b/tests/script/tsim/stable/alter_insert2.sim
index faae24d32aee731b9bee8ca4e5b89816c58cfb1c..a6046f3dda81458c4f760fb0c48b1352e21105fe 100644
--- a/tests/script/tsim/stable/alter_insert2.sim
+++ b/tests/script/tsim/stable/alter_insert2.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d4
sql use d4
sql create table tb (ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10))
@@ -662,4 +662,4 @@ if $data62 != null then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_metrics.sim b/tests/script/tsim/stable/alter_metrics.sim
index e32250de130210612a8f7bf70df7225da381c1ab..203f41e18bcf3148ab6b954288320b326bb3c07d 100644
--- a/tests/script/tsim/stable/alter_metrics.sim
+++ b/tests/script/tsim/stable/alter_metrics.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d2
sql use d2
sql create table mt (ts timestamp, a int) TAGS (t int)
@@ -757,8 +757,8 @@ endi
print ======= over
sql drop database d2
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/column_add.sim b/tests/script/tsim/stable/column_add.sim
index c0f3b4f4907402a863e5c4b25bcb8c4ee3d6f46f..05189f6c7d9f6800e7d8229c29c9bcd7284d238f 100644
--- a/tests/script/tsim/stable/column_add.sim
+++ b/tests/script/tsim/stable/column_add.sim
@@ -116,7 +116,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != NULL then
return -1
endi
@@ -153,7 +153,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != NULL then
return -1
endi
@@ -299,4 +299,4 @@ if $rows != 10 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/column_modify.sim b/tests/script/tsim/stable/column_modify.sim
index e2752ccf951cef30587aa1f604f92cbbaa265b85..43284ba829ecd662872b24cefc13a51db28d025b 100644
--- a/tests/script/tsim/stable/column_modify.sim
+++ b/tests/script/tsim/stable/column_modify.sim
@@ -31,7 +31,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -92,7 +92,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -106,4 +106,4 @@ if $data[1][3] != 101 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/disk.sim b/tests/script/tsim/stable/disk.sim
index e0e51b2625d5d90640dc846cefa0d151d9e4efb5..8edd0a845ecf7ced9638b32640d3278e73c93835 100644
--- a/tests/script/tsim/stable/disk.sim
+++ b/tests/script/tsim/stable/disk.sim
@@ -188,4 +188,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/refcount.sim b/tests/script/tsim/stable/refcount.sim
index a83c0ca53f3033513e48dd21252a9db53eab4774..35d8767efd55df3d6a0126c1d9fd0e33c0f41176 100644
--- a/tests/script/tsim/stable/refcount.sim
+++ b/tests/script/tsim/stable/refcount.sim
@@ -123,4 +123,4 @@ if $rows != 2 then
endi
print =============== step6
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_add.sim b/tests/script/tsim/stable/tag_add.sim
index 7ee9aee974681a21ba7b186cd3d84f9492f6523e..4f5f0e745234e39ecce1e24a0e918094db8676f9 100644
--- a/tests/script/tsim/stable/tag_add.sim
+++ b/tests/script/tsim/stable/tag_add.sim
@@ -139,7 +139,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -170,7 +170,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -190,4 +190,4 @@ if $rows != 7 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_drop.sim b/tests/script/tsim/stable/tag_drop.sim
index 7902358817c1ee9ba6038233a08810504be6fc70..b457bf195b8991721c59a581e6bd252b8f823906 100644
--- a/tests/script/tsim/stable/tag_drop.sim
+++ b/tests/script/tsim/stable/tag_drop.sim
@@ -165,7 +165,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -196,7 +196,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 201 then
return -1
endi
@@ -229,7 +229,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 201 then
return -1
endi
@@ -261,7 +261,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 301 then
return -1
endi
@@ -323,7 +323,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 302 then
return -1
endi
@@ -334,4 +334,4 @@ if $data[0][5] != 304 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_filter.sim b/tests/script/tsim/stable/tag_filter.sim
index f44142fbbffdb4f19b32afd51f6d75fa2e798e88..de2a87d6c4a710d8fcb2af773d20ac1bfc252f84 100644
--- a/tests/script/tsim/stable/tag_filter.sim
+++ b/tests/script/tsim/stable/tag_filter.sim
@@ -27,47 +27,47 @@ sql create table db.ctb6 using db.stb tags(6, "102")
sql insert into db.ctb6 values(now, 6, "2")
sql select * from db.stb where t1 = 1
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 < 1
-if $rows != 0 then
+sql select * from db.stb where t1 < 1
+if $rows != 0 then
return -=1
endi
-sql select * from db.stb where t1 < 2
-if $rows != 1 then
+sql select * from db.stb where t1 < 2
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 <= 2
-if $rows != 2 then
+sql select * from db.stb where t1 <= 2
+if $rows != 2 then
return -1
endi
-sql select * from db.stb where t1 >= 1
-if $rows != 6 then
+sql select * from db.stb where t1 >= 1
+if $rows != 6 then
return -1
endi
-sql select * from db.stb where t1 > 1
-if $rows != 5 then
+sql select * from db.stb where t1 > 1
+if $rows != 5 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 1
-if $rows != 1 then
+sql select * from db.stb where t1 between 1 and 1
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 6
-if $rows != 6 then
+sql select * from db.stb where t1 between 1 and 6
+if $rows != 6 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 7
-if $rows != 6 then
+sql select * from db.stb where t1 between 1 and 7
+if $rows != 6 then
return -1
endi
@@ -88,25 +88,25 @@ sql insert into db.ctbBin2 values(now, 3, "2")
sql create table db.ctbBin3 using db.stbBin tags("d")
sql insert into db.ctbBin3 values(now, 4, "2")
-sql select * from db.stbBin where t1 = "a"
-if $rows != 1 then
+sql select * from db.stbBin where t1 = "a"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbBin where t1 < "a"
-if $rows != 0 then
+sql select * from db.stbBin where t1 < "a"
+if $rows != 0 then
return -=1
endi
-sql select * from db.stbBin where t1 < "b"
-if $rows != 1 then
+sql select * from db.stbBin where t1 < "b"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbBin where t1 between "a" and "e"
-if $rows != 4 then
+sql select * from db.stbBin where t1 between "a" and "e"
+if $rows != 4 then
return -1
endi
@@ -127,25 +127,25 @@ sql insert into db.ctbNc2 values(now, 3, "2")
sql create table db.ctbNc3 using db.stbNc tags("d")
sql insert into db.ctbNc3 values(now, 4, "2")
-sql select * from db.stbNc where t1 = "a"
-if $rows != 1 then
+sql select * from db.stbNc where t1 = "a"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbNc where t1 < "a"
-if $rows != 0 then
+sql select * from db.stbNc where t1 < "a"
+if $rows != 0 then
return -=1
endi
-sql select * from db.stbNc where t1 < "b"
-if $rows != 1 then
+sql select * from db.stbNc where t1 < "b"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbNc where t1 between "a" and "e"
-if $rows != 4 then
+sql select * from db.stbNc where t1 between "a" and "e"
+if $rows != 4 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_modify.sim b/tests/script/tsim/stable/tag_modify.sim
index 909ed7935944aa63f7776bbc75f27b7b156bf0fe..53e7227d1b43f32bbd14e719837b15c2b27e3ca5 100644
--- a/tests/script/tsim/stable/tag_modify.sim
+++ b/tests/script/tsim/stable/tag_modify.sim
@@ -28,7 +28,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -55,7 +55,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -120,4 +120,4 @@ if $data[4][2] != 5 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_rename.sim b/tests/script/tsim/stable/tag_rename.sim
index 5bdfa24990d6742d25dee2a45d5aefd94230f648..c85ed183de1946ea7b876f989473189e7834e4e6 100644
--- a/tests/script/tsim/stable/tag_rename.sim
+++ b/tests/script/tsim/stable/tag_rename.sim
@@ -28,7 +28,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -52,7 +52,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -117,4 +117,4 @@ if $data[4][2] != 4 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim
index 5392979c0a218884a50d0ebe9ddb39558e82304f..d9777d5133e8226364805d85fb0918caac763adb 100644
--- a/tests/script/tsim/stream/basic1.sim
+++ b/tests/script/tsim/stream/basic1.sim
@@ -462,10 +462,10 @@ if $data25 != 3 then
return -1
endi
-sql create database test2 vgroups 1
-sql select * from information_schema.ins_databases
+sql create database test2 vgroups 1;
+sql select * from information_schema.ins_databases;
-sql use test2
+sql use test2;
sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
sql create table t1 using st tags(1,1,1);
sql create table t2 using st tags(2,2,2);
diff --git a/tests/script/tsim/stream/partitionbyColumn0.sim b/tests/script/tsim/stream/partitionbyColumn0.sim
new file mode 100644
index 0000000000000000000000000000000000000000..d91d4b7bf0c2f81cd00368b938898a15aec34091
--- /dev/null
+++ b/tests/script/tsim/stream/partitionbyColumn0.sim
@@ -0,0 +1,570 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+sql drop stream if exists streams0;
+sql drop stream if exists streams1;
+sql drop stream if exists streams2;
+sql drop stream if exists streams3;
+sql drop stream if exists streams4;
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a interval(10s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop0:
+sleep 100
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop0
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop0
+endi
+
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop1:
+sleep 100
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+
+$loop_count = 0
+
+loop2:
+sleep 100
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop2
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop2
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+
+$loop_count = 0
+
+loop3:
+sleep 100
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop3
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+$loop_count = 0
+
+loop4:
+sleep 100
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop4
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop4
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop4
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop4
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop4
+endi
+
+if $data22 != 1 then
+ print =====data22=$data22
+ goto loop4
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop4
+endi
+
+if $data32 != 2 then
+ print =====data32=$data32
+ goto loop4
+endi
+
+if $data41 != 1 then
+ print =====data41=$data41
+ goto loop4
+endi
+
+if $data42 != 3 then
+ print =====data42=$data42
+ goto loop4
+endi
+
+sql drop stream if exists streams1;
+sql drop database if exists test1;
+sql create database test1 vgroups 1;
+sql use test1;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b interval(10s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,1,2,1,1.0);
+sql insert into t1 values(1648791213001,2,1,2,2.0);
+sql insert into t1 values(1648791213001,1,2,3,2.0);
+
+$loop_count = 0
+
+loop5:
+sleep 100
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+sql insert into t1 values(1648791223000,1,2,4,2.0);
+sql insert into t1 values(1648791223001,1,2,5,2.0);
+sql insert into t1 values(1648791223002,1,2,5,2.0);
+sql insert into t1 values(1648791213001,1,1,6,2.0) (1648791223002,1,1,7,2.0);
+
+$loop_count = 0
+
+loop6:
+sleep 100
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 6 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+if $data21 != 1 then
+ print =====data21=$data21
+ goto loop6
+endi
+
+if $data22 != 7 then
+ print =====data22=$data22
+ goto loop6
+endi
+
+if $data31 != 2 then
+ print =====data31=$data31
+ goto loop6
+endi
+
+if $data32 != 5 then
+ print =====data32=$data32
+ goto loop6
+endi
+
+sql drop stream if exists streams2;
+sql drop database if exists test2;
+sql create database test2 vgroups 4;
+sql use test2;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop7:
+sleep 100
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop7
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop7
+endi
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+sql insert into t2 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop8:
+sleep 100
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop8
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop8
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+
+$loop_count = 0
+
+loop9:
+sleep 100
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop9
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop9
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213001,2,2,3,1.0);
+sql insert into t2 values(1648791213002,2,2,3,1.0);
+sql insert into t2 values(1648791213002,1,2,3,1.0);
+
+$loop_count = 0
+
+loop10:
+sleep 100
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop10
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop10
+endi
+
+if $data11 != 4 thenloop4
+ print =====data11=$data11
+ goto loop10
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop10
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791223002,3,2,3,1.0);
+sql insert into t2 values(1648791223003,3,2,3,1.0);
+sql insert into t2 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+$loop_count = 0
+
+loop11:
+sleep 100
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop11
+endi
+
+if $data11 != 4 then
+ print =====data11=$data11
+ goto loop11
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop11
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop11
+endi
+
+if $data22 != 2 then
+ print =====data22=$data22
+ goto loop11
+endi
+
+if $data31 != 2 then
+ print =====data31=$data31
+ goto loop11
+endi
+
+if $data32 != 3 then
+ print =====data32=$data32
+ goto loop11
+endi
+
+if $data41 != 4 then
+ print =====data41=$data41
+ goto loop11
+endi
+
+if $data42 != 1 then
+ print =====data42=$data42
+ goto loop11
+endi
+
+sql drop stream if exists streams4;
+sql drop database if exists test4;
+sql create database test4 vgroups 4;
+sql use test4;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create table t3 using st tags(2,2,2);
+sql create table t4 using st tags(2,2,2);
+sql create stream streams4 trigger at_once into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s);
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t3 values(1648791213000,2,2,3,1.0);
+sql insert into t4 values(1648791213000,2,2,3,1.0);
+sql insert into t4 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop13:
+sleep 100
+sql select * from test.streamt4 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop13
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop13
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop13
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop13
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop13
+endi
+
+sql insert into t4 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791233000,2,2,3,1.0);
+
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop14:
+sleep 100
+sql select * from test.streamt4 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $rows != 3 then
+ print =====rows=$rows
+ goto loop14
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop14
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop14
+endi
+
+if $data21 != 1 then
+ print =====data21=$data21
+ goto loop14
+endi
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+system sh/stop_dnodes.sh
+
+#goto looptest
\ No newline at end of file
diff --git a/tests/script/tsim/stream/partitionbyColumn1.sim b/tests/script/tsim/stream/partitionbyColumn1.sim
new file mode 100644
index 0000000000000000000000000000000000000000..7f5c53ebe3855693e5974aa4c2945af1af235216
--- /dev/null
+++ b/tests/script/tsim/stream/partitionbyColumn1.sim
@@ -0,0 +1,546 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+sql drop stream if exists streams0;
+sql drop stream if exists streams1;
+sql drop stream if exists streams2;
+sql drop stream if exists streams3;
+sql drop stream if exists streams4;
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a session(ts, 5s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop0:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop0
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop0
+endi
+
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+
+loop1:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+
+loop2:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop2
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop2
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+
+loop3:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop3
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+loop4:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop4
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop4
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop4
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop4
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop4
+endi
+
+if $data22 != 1 then
+ print =====data22=$data22
+ goto loop4
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop4
+endi
+
+if $data32 != 2 then
+ print =====data32=$data32
+ goto loop4
+endi
+
+if $data41 != 1 then
+ print =====data41=$data41
+ goto loop4
+endi
+
+if $data42 != 3 then
+ print =====data42=$data42
+ goto loop4
+endi
+
+sql drop database if exists test1;
+sql create database test1 vgroups 1;
+sql use test1;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b session(ts, 5s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,1,2,1,1.0);
+sql insert into t1 values(1648791213001,2,1,2,2.0);
+sql insert into t1 values(1648791213001,1,2,3,2.0);
+
+$loop_count = 0
+
+loop5:
+sleep 300
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+sql insert into t1 values(1648791223000,1,2,4,2.0);
+sql insert into t1 values(1648791223001,1,2,5,2.0);
+sql insert into t1 values(1648791223002,1,2,5,2.0);
+sql insert into t1 values(1648791213001,1,1,6,2.0) (1648791223002,1,1,7,2.0);
+
+loop6:
+sleep 300
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 6 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop6
+endi
+
+if $data22 != 5 then
+ print =====data22=$data22
+ goto loop6
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop6
+endi
+
+if $data32 != 7 then
+ print =====data32=$data32
+ goto loop6
+endi
+
+sql drop database if exists test2;
+sql create database test2 vgroups 4;
+sql use test2;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a session(ts, 5s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop7:
+sleep 300
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop7
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop7
+endi
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+sql insert into t2 values(1648791213000,1,2,3,1.0);
+
+loop8:
+sleep 300
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop8
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop8
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+loop9:
+sleep 300
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop9
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop9
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213001,2,2,3,1.0);
+sql insert into t2 values(1648791213002,2,2,3,1.0);
+sql insert into t2 values(1648791213002,1,2,3,1.0);
+
+loop10:
+sleep 300
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 4 then
+ print =====data01=$data01
+ goto loop10
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop10
+endi
+
+if $data11 != 2 thenloop4
+ print =====data11=$data11
+ goto loop3
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop10
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791223002,3,2,3,1.0);
+sql insert into t2 values(1648791223003,3,2,3,1.0);
+sql insert into t2 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+loop11:
+sleep 300
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop11
+endi
+
+if $data11 != 4 then
+ print =====data11=$data11
+ goto loop11
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop11
+endi
+
+if $data21 != 4 then
+ print =====data21=$data21
+ goto loop11
+endi
+
+if $data22 != 1 then
+ print =====data22=$data22
+ goto loop11
+endi
+
+if $data31 != 2 then
+ print =====data31=$data31
+ goto loop11
+endi
+
+if $data32 != 2 then
+ print =====data32=$data32
+ goto loop11
+endi
+
+if $data41 != 2 then
+ print =====data41=$data41
+ goto loop11
+endi
+
+if $data42 != 3 then
+ print =====data42=$data42
+ goto loop11
+endi
+
+sql drop database if exists test4;
+sql create database test4 vgroups 4;
+sql use test4;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create table t3 using st tags(2,2,2);
+sql create table t4 using st tags(2,2,2);
+sql create stream streams4 trigger at_once into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a session(ts, 5s);
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t3 values(1648791213000,2,2,3,1.0);
+sql insert into t4 values(1648791213000,2,2,3,1.0);
+sql insert into t4 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop13:
+sleep 300
+sql select * from test.streamt4 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop14
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop13
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop13
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop11
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop11
+endi
+
+sql insert into t4 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791233000,2,2,3,1.0);
+
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+
+loop14:
+sleep 300
+sql select * from test.streamt4 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 3 then
+ print =====rows=$rows
+ goto loop14
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop14
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop14
+endi
+
+if $data21 != 1 then
+ print =====data21=$data21
+ goto loop14
+endi
+
+system sh/stop_dnodes.sh
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+#goto looptest
\ No newline at end of file
diff --git a/tests/script/tsim/stream/partitionbyColumn2.sim b/tests/script/tsim/stream/partitionbyColumn2.sim
new file mode 100644
index 0000000000000000000000000000000000000000..3d9acbcac50e360fc445b9ebe5fdbedec4009a7b
--- /dev/null
+++ b/tests/script/tsim/stream/partitionbyColumn2.sim
@@ -0,0 +1,269 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a state_window(b);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop0:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop0
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop0
+endi
+
+
+sql insert into t1 values(1648791213000,1,1,3,1.0);
+
+loop1:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,2,1,3,1.0);
+
+loop2:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop2
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop2
+endi
+
+sql insert into t1 values(1648791213000,2,1,3,1.0);
+sql insert into t1 values(1648791213001,2,1,3,1.0);
+sql insert into t1 values(1648791213002,2,1,3,1.0);
+sql insert into t1 values(1648791213002,1,1,3,1.0);
+
+loop3:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop3
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,1,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+loop4:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop4
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop4
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop4
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop4
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop4
+endi
+
+if $data22 != 1 then
+ print =====data22=$data22
+ goto loop4
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop4
+endi
+
+if $data32 != 2 then
+ print =====data32=$data32
+ goto loop4
+endi
+
+if $data41 != 1 then
+ print =====data41=$data41
+ goto loop4
+endi
+
+if $data42 != 3 then
+ print =====data42=$data42
+ goto loop4
+endi
+
+sql drop database if exists test1;
+sql create database test1 vgroups 1;
+sql use test1;
+sql create table t1(ts timestamp, a int, b int , c int, d int);
+sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(d) c3, _group_key(a+b) c4 from t1 partition by a+b state_window(c);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,1,2,1,1);
+sql insert into t1 values(1648791213001,2,1,1,2);
+sql insert into t1 values(1648791213001,1,2,1,3);
+
+$loop_count = 0
+
+loop5:
+sleep 300
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+sql insert into t1 values(1648791223000,1,2,2,4);
+sql insert into t1 values(1648791223001,1,2,2,5);
+sql insert into t1 values(1648791223002,1,2,2,6);
+sql insert into t1 values(1648791213001,1,1,1,7) (1648791223002,1,1,2,8);
+
+loop6:
+sleep 300
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 7 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop6
+endi
+
+if $data22 != 5 then
+ print =====data22=$data22
+ goto loop6
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop6
+endi
+
+if $data32 != 8 then
+ print =====data32=$data32
+ goto loop6
+endi
+
+system sh/stop_dnodes.sh
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+#goto looptest
diff --git a/tests/script/tsim/sync/create-mnode.sim b/tests/script/tsim/sync/create-mnode.sim
new file mode 100644
index 0000000000000000000000000000000000000000..cfaafc8208e7e10f7b53be76a6eaa94e718efbbd
--- /dev/null
+++ b/tests/script/tsim/sync/create-mnode.sim
@@ -0,0 +1,20 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+
+sql connect
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+sql create mnode on dnode 2
+sql create mnode on dnode 3
diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim
index 25c1a84db699a8cdef5678abaf728f4a93690bde..86f95755d09f7e2f1c0654b94f63f471f9a074d0 100644
--- a/tests/script/tsim/user/privilege_sysinfo.sim
+++ b/tests/script/tsim/user/privilege_sysinfo.sim
@@ -8,7 +8,20 @@ sql create user sysinfo0 pass 'taosdata'
sql create user sysinfo1 pass 'taosdata'
sql alter user sysinfo0 sysinfo 0
sql alter user sysinfo1 sysinfo 1
+
sql create database db
+sql use db
+sql create table db.stb (ts timestamp, i int) tags (t int)
+sql create table db.ctb using db.stb tags (1)
+sql create table db.ntb (ts timestamp, i int)
+sql insert into db.ctb values (now, 1);
+sql insert into db.ntb values (now, 1);
+sql select * from db.stb
+sql select * from db.ctb
+sql select * from db.ntb
+
+sql create database d2
+sql GRANT all ON d2.* to sysinfo0;
print user sysinfo0 login
sql close
@@ -17,11 +30,31 @@ sql connect sysinfo0
print =============== check oper
sql_error create user u1 pass 'u1'
sql_error drop user sysinfo1
-sql_error alter user sysinfo1 pass '1'
sql_error alter user sysinfo0 pass '1'
+sql_error alter user sysinfo0 enable 0
+sql_error alter user sysinfo0 enable 1
+sql_error alter user sysinfo1 pass '1'
+sql_error alter user sysinfo1 enable 1
+sql_error alter user sysinfo1 enable 1
+sql_error GRANT read ON db.* to sysinfo0;
+sql_error GRANT read ON *.* to sysinfo0;
+sql_error REVOKE read ON db.* from sysinfo0;
+sql_error REVOKE read ON *.* from sysinfo0;
+sql_error GRANT write ON db.* to sysinfo0;
+sql_error GRANT write ON *.* to sysinfo0;
+sql_error REVOKE write ON db.* from sysinfo0;
+sql_error REVOKE write ON *.* from sysinfo0;
+sql_error REVOKE write ON *.* from sysinfo0;
sql_error create dnode $hostname port 7200
sql_error drop dnode 1
+sql_error alter dnode 1 'debugFlag 135'
+sql_error alter dnode 1 'dDebugFlag 131'
+sql_error alter dnode 1 'resetlog'
+sql_error alter dnode 1 'monitor' '1'
+sql_error alter dnode 1 'monitor' '0'
+sql_error alter dnode 1 'monitor 1'
+sql_error alter dnode 1 'monitor 0'
sql_error create qnode on dnode 1
sql_error drop qnode on dnode 1
@@ -44,20 +77,106 @@ sql_error create database d1
sql_error drop database db
sql_error use db
sql_error alter database db replica 1;
+sql_error alter database db keep 21
sql_error show db.vgroups
-sql select * from information_schema.ins_stables where db_name = 'db'
-sql select * from information_schema.ins_tables where db_name = 'db'
+
+sql_error create table db.stb1 (ts timestamp, i int) tags (t int)
+sql_error create table db.ctb1 using db.stb1 tags (1)
+sql_error create table db.ntb1 (ts timestamp, i int)
+sql_error insert into db.ctb values (now, 1);
+sql_error insert into db.ntb values (now, 1);
+sql_error select * from db.stb
+sql_error select * from db.ctb
+sql_error select * from db.ntb
+
+sql use d2
+sql create table d2.stb2 (ts timestamp, i int) tags (t int)
+sql create table d2.ctb2 using d2.stb2 tags (1)
+sql create table d2.ntb2 (ts timestamp, i int)
+sql insert into d2.ctb2 values (now, 1);
+sql insert into d2.ntb2 values (now, 1);
+sql select * from d2.stb2
+sql select * from d2.ctb2
+sql select * from d2.ntb2
print =============== check show
-sql select * from information_schema.ins_users
+sql_error show users
sql_error show cluster
-sql select * from information_schema.ins_dnodes
-sql select * from information_schema.ins_mnodes
+sql_error select * from information_schema.ins_dnodes
+sql_error select * from information_schema.ins_mnodes
sql_error show snodes
-sql select * from information_schema.ins_qnodes
+sql_error select * from information_schema.ins_qnodes
+sql_error show dnodes
+sql_error show snodes
+sql_error show qnodes
+sql_error show mnodes
sql_error show bnodes
+sql_error show db.vgroups
+sql_error show db.stables
+sql_error show db.tables
+sql_error show indexes from stb from db
+sql show databases
+sql_error show d2.vgroups
+sql show d2.stables
+sql show d2.tables
+sql show indexes from stb2 from d2
+#sql_error show create database db
+sql_error show create table db.stb;
+sql_error show create table db.ctb;
+sql_error show create table db.ntb;
+sql show streams
+sql show consumers
+sql show topics
+sql show subscriptions
+sql show functions
sql_error show grants
+sql show queries
+sql show connections
+sql show apps
+sql show transactions
+sql_error show create database d2
+sql show create table d2.stb2;
+sql show create table d2.ctb2;
+sql show create table d2.ntb2;
+sql_error show variables;
+sql show local variables;
sql_error show dnode 1 variables;
-sql show variables;
+sql_error show variables;
+
+
+print =============== check information_schema
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+sql use information_schema;
+sql_error select * from information_schema.ins_dnodes
+sql_error select * from information_schema.ins_mnodes
+sql_error select * from information_schema.ins_modules
+sql_error select * from information_schema.ins_qnodes
+sql_error select * from information_schema.ins_cluster
+sql select * from information_schema.ins_databases
+sql select * from information_schema.ins_functions
+sql select * from information_schema.ins_indexes
+sql select * from information_schema.ins_stables
+sql select * from information_schema.ins_tables
+sql select * from information_schema.ins_tags
+sql select * from information_schema.ins_users
+sql select * from information_schema.ins_topics
+sql select * from information_schema.ins_subscriptions
+sql select * from information_schema.ins_streams
+sql_error select * from information_schema.ins_grants
+sql_error select * from information_schema.ins_vgroups
+sql_error select * from information_schema.ins_configs
+sql_error select * from information_schema.ins_dnode_variables
+
+print =============== check performance_schema
+sql use performance_schema;
+sql select * from performance_schema.perf_connections
+sql select * from performance_schema.perf_queries
+sql select * from performance_schema.perf_consumers
+sql select * from performance_schema.perf_trans
+sql select * from performance_schema.perf_apps
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim
index fcc5b04c907852f87c469a3dc9d32c5ba1295327..d85a1bebc898ca79a20e4c495081077a2b1a4249 100644
--- a/tests/script/tsim/valgrind/checkError6.sim
+++ b/tests/script/tsim/valgrind/checkError6.sim
@@ -67,17 +67,17 @@ sql select diff(tbcol) from tb1 where tbcol > 5 and tbcol < 20 order by ts
sql select first(tbcol), last(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol order by tgcol
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select last_row(*) from tb1 where tbcol > 5 and tbcol < 20
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from tb1 interval(10s, 2s) sliding(10s)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) order by tgcol desc
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from tb1
sql select length("abcd1234"), char_length("abcd1234=-+*") from tb1
sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from tb1
sql select * from tb1 where tbcol not in (1,2,3,null);
sql select * from tb1 where tbcol + 3 <> null;
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select tbcol5 - tbcol3 from tb1
print =============== step4: stb
@@ -97,8 +97,8 @@ sql select first(tbcol), last(tbcol) as c from stb group by tgcol
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m)
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) order by tgcol desc
sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from stb interval(10s, 2s) sliding(10s)
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from stb
@@ -108,7 +108,7 @@ sql select * from stb where tbcol not in (1,2,3,null);
sql select * from stb where tbcol + 3 <> null;
sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb where tbcol = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol interval(1d)
sql select _wstart, count(*) from tb1 session(ts, 1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select tbcol5 - tbcol3 from stb
sql select spread( tbcol2 )/44, spread(tbcol2), 0.204545455 * 44 from stb;
@@ -127,8 +127,8 @@ sql explain analyze select count(*),sum(tbcol) from stb;
sql explain analyze select count(*),sum(tbcol) from stb group by tbcol;
sql explain analyze select * from information_schema.ins_stables;
sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2';
-sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
print =============== step6: in cast
sql select 1+1n;
diff --git a/tests/system-test/0-others/user_control.py b/tests/system-test/0-others/user_control.py
index 3be59f0adf691f9479cce2c927c9161741bc8130..a20b7b17bccf9a04d52e46d56af2a2d0b0e489fb 100644
--- a/tests/system-test/0-others/user_control.py
+++ b/tests/system-test/0-others/user_control.py
@@ -282,12 +282,12 @@ class TDTestCase:
use.error(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
elif check_priv == PRIVILEGES_WRITE:
use.query(f"use {DBNAME}")
- use.query(f"show {DBNAME}.tables")
+ use.error(f"show {DBNAME}.tables")
use.error(f"select * from {DBNAME}.{CTBNAME}")
use.query(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
elif check_priv is None:
use.error(f"use {DBNAME}")
- # use.error(f"show {DBNAME}.tables")
+ use.error(f"show {DBNAME}.tables")
use.error(f"show tables")
use.error(f"select * from {DBNAME}.{CTBNAME}")
use.error(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
index 25e2378f4611aea030011ed29ecce6b9b96cad84..cae4294bc90c16ad3fed032eff610f5b943d789e 100644
--- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py
+++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
@@ -31,7 +31,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
self._conn = conn
def createDb(self, name="test", db_update_tag=0):
@@ -357,7 +357,7 @@ class TDTestCase:
"""
normal tags and cols, one for every elm
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
@@ -365,7 +365,7 @@ class TDTestCase:
"""
check all normal type
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type)
@@ -379,7 +379,7 @@ class TDTestCase:
please test :
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = f'L{binary_symbols}'
input_sql, stb_name = self.genFullTypeSql(c7=binary_symbols, c8=nchar_symbols, t7=binary_symbols, t8=nchar_symbols)
@@ -390,7 +390,7 @@ class TDTestCase:
test ts list --> ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
# ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
ts_list = ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0]
for ts in ts_list:
input_sql, stb_name = self.genFullTypeSql(ts=ts)
@@ -401,7 +401,7 @@ class TDTestCase:
check id.index in tags
eg: t0=**,id=**,t1=**
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True)
self.resCmp(input_sql, stb_name)
@@ -410,7 +410,7 @@ class TDTestCase:
check id param
eg: id and ID
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True)
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True)
@@ -420,7 +420,7 @@ class TDTestCase:
"""
id not exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
query_sql = f"select tbname from {stb_name}"
@@ -436,10 +436,10 @@ class TDTestCase:
max col count is ??
"""
for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
except SchemalessError as err:
@@ -450,7 +450,7 @@ class TDTestCase:
test illegal id name
mix "~!@#$¥%^&*()-+|[]、「」【】;:《》<>?"
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("~!@#$¥%^&*()-+|[]、「」【】;:《》<>?")
for i in rstr:
stb_name=f"aaa{i}bbb"
@@ -462,7 +462,7 @@ class TDTestCase:
"""
id is start with num
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -473,7 +473,7 @@ class TDTestCase:
"""
check now unsupported
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="now")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -484,7 +484,7 @@ class TDTestCase:
"""
check date format ts unsupported
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -495,7 +495,7 @@ class TDTestCase:
"""
check ts format like 16260068336390us19
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -506,7 +506,7 @@ class TDTestCase:
"""
check full type tag value limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for t1 in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(t1=t1)
@@ -602,7 +602,7 @@ class TDTestCase:
"""
check full type col value limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for c1 in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(c1=c1)
@@ -699,7 +699,7 @@ class TDTestCase:
"""
test illegal tag col value
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
input_sql1 = self.genFullTypeSql(t0=i)[0]
@@ -758,7 +758,7 @@ class TDTestCase:
"""
check duplicate Id Tag Col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -792,7 +792,7 @@ class TDTestCase:
"""
case no id when stb exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f")
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f")
@@ -805,7 +805,7 @@ class TDTestCase:
"""
check duplicate insert when stb exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -816,7 +816,7 @@ class TDTestCase:
"""
check length increase
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
tb_name = tdCom.getLongName(5, "letters")
@@ -833,7 +833,7 @@ class TDTestCase:
* col is added without value when update==0
* col is added with value when update==1
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -850,7 +850,7 @@ class TDTestCase:
"""
check column and tag count add
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f")
self.resCmp(input_sql, stb_name)
@@ -866,7 +866,7 @@ class TDTestCase:
condition: stb not change
insert two table, keep tag unchange, change col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f", id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -888,7 +888,7 @@ class TDTestCase:
"""
every binary and nchar must be length+2
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
@@ -928,7 +928,7 @@ class TDTestCase:
"""
check nchar length limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
@@ -963,7 +963,7 @@ class TDTestCase:
"""
test batch insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
# tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
@@ -982,7 +982,7 @@ class TDTestCase:
"""
test multi insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = []
stb_name = tdCom.getLongName(8, "letters")
# tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -996,7 +996,7 @@ class TDTestCase:
"""
test batch error insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"]
@@ -1068,7 +1068,7 @@ class TDTestCase:
"""
thread input different stb
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genSqlList()[0]
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
tdSql.query(f"show tables;")
@@ -1078,7 +1078,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1095,7 +1095,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, add columes and tags, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1112,7 +1112,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1129,7 +1129,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
@@ -1144,7 +1144,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5]
@@ -1159,7 +1159,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add tag, mul col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6]
@@ -1171,7 +1171,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1186,7 +1186,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1205,7 +1205,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add tag, mul col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1226,7 +1226,7 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
@@ -1241,7 +1241,7 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11]
diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
index 003abe9d10346f9b7cce1dbdb6f6f0ed73e3ea55..3b01784000b74c1f6bb072f24e8be36e99d37f4f 100644
--- a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
@@ -459,7 +459,7 @@ class TDTestCase:
normal tags and cols, one for every elm
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self.resCmp(input_json, stb_name)
@@ -468,7 +468,7 @@ class TDTestCase:
check all normal type
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_json_list = [self.genFullTypeJson(tag_value=self.genTagValue(t0_value=t_type))[0],
@@ -489,7 +489,7 @@ class TDTestCase:
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = binary_symbols
input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type),
@@ -505,7 +505,7 @@ class TDTestCase:
# ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006834", 0]
for ts in ts_list:
if "s" in str(ts):
@@ -571,7 +571,7 @@ class TDTestCase:
eg: t0=**,id=**,t1=**
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
@@ -581,7 +581,7 @@ class TDTestCase:
eg: id and ID
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_upper_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
input_json, stb_name = self.genFullTypeJson(id_mixul_tag=True, value_type=value_type)
@@ -594,7 +594,7 @@ class TDTestCase:
id not exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_noexist_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
query_sql = f"select tbname from {stb_name}"
@@ -610,10 +610,10 @@ class TDTestCase:
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
for input_json in [self.genLongJson(128, value_type)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
for input_json in [self.genLongJson(129, value_type)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
except SchemalessError as err:
@@ -625,7 +625,7 @@ class TDTestCase:
mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?")
for i in rstr:
input_json = self.genFullTypeJson(tb_name=f'aa{i}bb', value_type=value_type)[0]
@@ -639,7 +639,7 @@ class TDTestCase:
id is start with num
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(tb_name="1aaabbb", value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -651,7 +651,7 @@ class TDTestCase:
check now unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="now", t_type="ns", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -663,7 +663,7 @@ class TDTestCase:
check date format ts unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="2021-07-21\ 19:01:46.920", t_type="ns", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -675,7 +675,7 @@ class TDTestCase:
check ts format like 16260068336390us19
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="16260068336390us19", t_type="us", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -690,7 +690,7 @@ class TDTestCase:
length of stb_name tb_name <= 192
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tdSql.execute('reset query cache')
stb_name_192 = tdCom.getLongName(len=192, mode="letters")
tb_name_192 = tdCom.getLongName(len=192, mode="letters")
@@ -715,7 +715,7 @@ class TDTestCase:
check tag name limit <= 62
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tag_name = tdCom.getLongName(61, "letters")
tag_name = f't{tag_name}'
stb_name = tdCom.getLongName(7, "letters")
@@ -733,7 +733,7 @@ class TDTestCase:
check full type tag value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for t1 in [-127, 127]:
input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1, value_type=value_type))
@@ -854,12 +854,12 @@ class TDTestCase:
check full type col value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for value in [-128, 127]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-129, 128]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint"))[0]
try:
@@ -868,11 +868,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
# i16
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-32768]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-32769, 32768]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint"))[0]
try:
@@ -882,11 +882,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-2147483648]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-2147483649, 2147483648]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int"))[0]
try:
@@ -896,12 +896,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-9223372036854775808]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type))
self.resCmp(input_json, stb_name)
# ! bug
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# for value in [-9223372036854775809, 9223372036854775808]:
# print(value)
# input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint"))[0]
@@ -913,12 +913,12 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# f32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type))
self.resCmp(input_json, stb_name)
# * limit set to 4028234664*(10**38)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-3.4028234664*(10**38), 3.4028234664*(10**38)]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float"))[0]
try:
@@ -928,12 +928,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308), -1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))
self.resCmp(input_json, stb_name)
# * limit set to 1.797693134862316*(10**308)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-1.797693134862316*(10**308), -1.797693134862316*(10**308)]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0]
try:
@@ -944,12 +944,12 @@ class TDTestCase:
# if value_type == "obj":
# # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# try:
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -959,12 +959,12 @@ class TDTestCase:
# # nchar
# # * legal nchar could not be larger than 16374/4
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# try:
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -973,14 +973,14 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# elif value_type == "default":
# # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16374, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4093, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16375, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
@@ -997,7 +997,7 @@ class TDTestCase:
test illegal tag col value
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
try:
@@ -1046,7 +1046,7 @@ class TDTestCase:
check duplicate Id Tag Col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(id_double_tag=True, value_type=value_type)[0]
print(input_json)
try:
@@ -1068,7 +1068,7 @@ class TDTestCase:
case no id when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(tb_name="sub_table_0123456", col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
self.resCmp(input_json, stb_name)
input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, id_noexist_tag=True, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
@@ -1081,7 +1081,7 @@ class TDTestCase:
check duplicate insert when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self.resCmp(input_json, stb_name)
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1091,7 +1091,7 @@ class TDTestCase:
"""
check length increase
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
self.resCmp(input_json, stb_name)
@@ -1105,7 +1105,7 @@ class TDTestCase:
check length increase
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = "test_crash"
input_json = self.genFullTypeJson(stb_name=stb_name)[0]
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1128,7 +1128,7 @@ class TDTestCase:
* col is added with value when update==1
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -1154,7 +1154,7 @@ class TDTestCase:
check tag count add
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1171,7 +1171,7 @@ class TDTestCase:
insert two table, keep tag unchange, change col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True)
self.resCmp(input_json, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -1194,7 +1194,7 @@ class TDTestCase:
every binary and nchar must be length+2
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
tag_value = {"t0": {"value": True, "type": "bool"}}
@@ -1240,7 +1240,7 @@ class TDTestCase:
check nchar length limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
tag_value = {"t0": True}
@@ -1284,7 +1284,7 @@ class TDTestCase:
test batch insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = "stb_name"
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": 1, "type": "bigint"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
@@ -1319,7 +1319,7 @@ class TDTestCase:
test multi insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = list()
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -1335,7 +1335,7 @@ class TDTestCase:
test batch error insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": "tt", "type": "bool"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
{"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
try:
@@ -1349,7 +1349,7 @@ class TDTestCase:
test multi cols insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(c_multi_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1362,7 +1362,7 @@ class TDTestCase:
test blank col insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(c_blank_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1375,7 +1375,7 @@ class TDTestCase:
test blank tag insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(t_blank_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1388,7 +1388,7 @@ class TDTestCase:
check nchar ---> chinese
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(chinese_tag=True)
self.resCmp(input_json, stb_name)
@@ -1397,7 +1397,7 @@ class TDTestCase:
multi_field
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(multi_field_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1407,7 +1407,7 @@ class TDTestCase:
def spellCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_json_list = [{"metric": f'{stb_name}_1', "timestamp": {"value": 1626006833639000000, "type": "Ns"}, "value": {"value": 1, "type": "Bigint"}, "tags": {"t1": {"value": 127, "type": "tinYint"}}},
{"metric": f'{stb_name}_2', "timestamp": {"value": 1626006833639000001, "type": "nS"}, "value": {"value": 32767, "type": "smallInt"}, "tags": {"t1": {"value": 32767, "type": "smallInt"}}},
@@ -1426,7 +1426,7 @@ class TDTestCase:
def tbnameTagsColsNameCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = {'metric': 'rFa$sta', 'timestamp': {'value': 1626006834, 'type': 's'}, 'value': {'value': True, 'type': 'bool'}, 'tags': {'Tt!0': {'value': False, 'type': 'bool'}, 'tT@1': {'value': 127, 'type': 'tinyint'}, 't@2': {'value': 32767, 'type': 'smallint'}, 't$3': {'value': 2147483647, 'type': 'int'}, 't%4': {'value': 9223372036854775807, 'type': 'bigint'}, 't^5': {'value': 11.12345027923584, 'type': 'float'}, 't&6': {'value': 22.123456789, 'type': 'double'}, 't*7': {'value': 'binaryTagValue', 'type': 'binary'}, 't!@#$%^&*()_+[];:<>?,9': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': 'rFas$ta_1'}}
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
query_sql = 'select * from `rFa$sta`'
@@ -1441,7 +1441,7 @@ class TDTestCase:
metric value "." trans to "_"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(point_trans_tag=True, value_type=value_type)[0]
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
tdSql.execute("drop table `.point.trans.test`")
@@ -1509,7 +1509,7 @@ class TDTestCase:
thread input different stb
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genSqlList(value_type=value_type)[0]
self.multiThreadRun(self.genMultiThreadSeq(input_json))
tdSql.query(f"show tables;")
@@ -1520,7 +1520,7 @@ class TDTestCase:
thread input same stb tb, different data, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1538,7 +1538,7 @@ class TDTestCase:
thread input same stb tb, different data, add columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1556,7 +1556,7 @@ class TDTestCase:
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1574,7 +1574,7 @@ class TDTestCase:
thread input same stb, different tb, different data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[4]
@@ -1587,7 +1587,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
s_stb_d_tb_m_tag_list = [({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "omfdhyom", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
@@ -1605,7 +1605,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[6]
@@ -1618,7 +1618,7 @@ class TDTestCase:
thread input same stb tb, different ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1638,7 +1638,7 @@ class TDTestCase:
thread input same stb tb, different ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1660,7 +1660,7 @@ class TDTestCase:
thread input same stb tb, different ts, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1683,7 +1683,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[10]
@@ -1696,7 +1696,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
index 3c47a65746b89c96b77408b6c910c88a8703e147..209cfb724e460207493dc2ca1ab0dd3522eb333b 100644
--- a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
@@ -30,7 +30,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
self._conn = conn
self.smlChildTableName_value = "id"
@@ -351,7 +351,7 @@ class TDTestCase:
normal tags and cols, one for every elm
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -360,7 +360,7 @@ class TDTestCase:
check all normal type
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol)
@@ -375,7 +375,7 @@ class TDTestCase:
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = f'L{binary_symbols}'
input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol)
@@ -388,7 +388,7 @@ class TDTestCase:
test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"]
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
@@ -407,7 +407,7 @@ class TDTestCase:
def openTstbTelnetTsCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
stb_name = input_sql.split(" ")[0]
self.resCmp(input_sql, stb_name, ts=0)
@@ -431,7 +431,7 @@ class TDTestCase:
eg: t0=**,id=**,t1=**
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -441,7 +441,7 @@ class TDTestCase:
eg: id and ID
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol)
@@ -454,7 +454,7 @@ class TDTestCase:
id not exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
query_sql = f"select tbname from {stb_name}"
@@ -470,10 +470,10 @@ class TDTestCase:
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
for input_sql in [self.genLongSql(128)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
for input_sql in [self.genLongSql(129)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
@@ -486,7 +486,7 @@ class TDTestCase:
mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?")
for i in rstr:
input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol)
@@ -498,7 +498,7 @@ class TDTestCase:
id is start with num
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -507,7 +507,7 @@ class TDTestCase:
check now unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="now")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -520,7 +520,7 @@ class TDTestCase:
check date format ts unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -533,7 +533,7 @@ class TDTestCase:
check ts format like 16260068336390us19
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -551,7 +551,7 @@ class TDTestCase:
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
stb_name_192 = tdCom.getLongName(len=192, mode="letters")
tb_name_192 = tdCom.getLongName(len=192, mode="letters")
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192)
self.resCmp(input_sql, stb_name)
tdSql.query(f'select * from {stb_name}')
@@ -581,7 +581,7 @@ class TDTestCase:
check tag name limit <= 62
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tag_name = tdCom.getLongName(61, "letters")
tag_name = f'T{tag_name}'
stb_name = tdCom.getLongName(7, "letters")
@@ -599,7 +599,7 @@ class TDTestCase:
check full type tag value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# nchar
# * legal nchar could not be larger than 16374/4
stb_name = tdCom.getLongName(7, "letters")
@@ -618,12 +618,12 @@ class TDTestCase:
check full type col value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for value in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-129i8", "128i8"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -632,11 +632,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
# i16
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-32768i16"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-32769i16", "32768i16"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -646,11 +646,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-2147483648i32"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-2147483649i32", "2147483648i32"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -660,11 +660,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-9223372036854775808i64"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-9223372036854775809i64", "9223372036854775808i64"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -674,12 +674,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
# * limit set to 4028234664*(10**38)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -689,12 +689,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
# # * limit set to 1.797693134862316*(10**308)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
# input_sql = self.genFullTypeSql(value=value)[0]
# try:
@@ -704,12 +704,12 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# # # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t'
# try:
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -719,12 +719,12 @@ class TDTestCase:
# # nchar
# # * legal nchar could not be larger than 16374/4
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t'
# try:
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -738,7 +738,7 @@ class TDTestCase:
test illegal tag col value
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
input_sql1, stb_name = self.genFullTypeSql(t0=i)
@@ -774,7 +774,7 @@ class TDTestCase:
check blank case
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t',
# f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"',
# f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"',
@@ -792,7 +792,7 @@ class TDTestCase:
check duplicate Id Tag Col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None)
@@ -815,7 +815,7 @@ class TDTestCase:
case no id when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f")
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f")
@@ -828,7 +828,7 @@ class TDTestCase:
check duplicate insert when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -840,7 +840,7 @@ class TDTestCase:
check length increase
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
tb_name = tdCom.getLongName(5, "letters")
@@ -858,7 +858,7 @@ class TDTestCase:
* col is added with value when update==1
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -885,7 +885,7 @@ class TDTestCase:
check tag count add
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f")
self.resCmp(input_sql, stb_name)
@@ -902,7 +902,7 @@ class TDTestCase:
insert two table, keep tag unchange, change col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -925,7 +925,7 @@ class TDTestCase:
check nchar length limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -949,7 +949,7 @@ class TDTestCase:
test batch insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -976,7 +976,7 @@ class TDTestCase:
test multi insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = []
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))')
@@ -992,7 +992,7 @@ class TDTestCase:
test batch error insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"",
f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""]
@@ -1007,7 +1007,7 @@ class TDTestCase:
test multi cols insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(c_multi_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1020,7 +1020,7 @@ class TDTestCase:
test blank col insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(c_blank_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1033,7 +1033,7 @@ class TDTestCase:
test blank tag insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(t_blank_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1046,7 +1046,7 @@ class TDTestCase:
check nchar ---> chinese
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(chinese_tag=True)
self.resCmp(input_sql, stb_name)
@@ -1055,7 +1055,7 @@ class TDTestCase:
multi_field
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(multi_field_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1065,7 +1065,7 @@ class TDTestCase:
def spellCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
@@ -1086,7 +1086,7 @@ class TDTestCase:
metric value "." trans to "_"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0]
if protocol == 'telnet-tcp':
stb_name = f'`{input_sql.split(" ")[1]}`'
@@ -1097,7 +1097,7 @@ class TDTestCase:
def defaultTypeCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \
@@ -1110,7 +1110,7 @@ class TDTestCase:
def tbnameTagsColsNameCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
if self.smlChildTableName_value == "ID":
input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1135,7 +1135,7 @@ class TDTestCase:
stb = "put"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0]
stb_name = f'`{input_sql.split(" ")[1]}`'
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -1204,7 +1204,7 @@ class TDTestCase:
thread input different stb
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genSqlList()[0]
print(input_sql)
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
@@ -1216,7 +1216,7 @@ class TDTestCase:
thread input same stb tb, different data, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1235,7 +1235,7 @@ class TDTestCase:
thread input same stb tb, different data, add columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1254,7 +1254,7 @@ class TDTestCase:
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1273,7 +1273,7 @@ class TDTestCase:
thread input same stb, different tb, different data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
@@ -1286,7 +1286,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
@@ -1303,7 +1303,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6]
@@ -1316,7 +1316,7 @@ class TDTestCase:
thread input same stb tb, different ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1336,7 +1336,7 @@ class TDTestCase:
thread input same stb tb, different ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1354,7 +1354,7 @@ class TDTestCase:
thread input same stb tb, different ts, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1377,7 +1377,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
@@ -1390,7 +1390,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py
index fd5d6ea1cf1d42623443cbe13eb60aac6b9e80ac..946453bb23137d6ebbb67f8d588a67e054f5c2f1 100644
--- a/tests/system-test/2-query/function_diff.py
+++ b/tests/system-test/2-query/function_diff.py
@@ -193,43 +193,38 @@ class TDTestCase:
# case17: only support normal table join
case17 = {
- "col": "t1.c1",
- "table_expr": "t1, t2",
- "condition": "where t1.ts=t2.ts"
+ "col": "table1.c1 ",
+ "table_expr": "db.t1 as table1, db.t2 as table2",
+ "condition": "where table1.ts=table2.ts"
}
self.checkdiff(**case17)
- # case18~19: with group by
- # case18 = {
- # "table_expr": "db.t1",
- # "condition": "group by c6"
- # }
- # self.checkdiff(**case18)
+ # case18~19: with group by , function diff not support group by
+
case19 = {
- "table_expr": "db.stb1",
+ "table_expr": "db.stb1 where tbname =='t0' ",
"condition": "partition by tbname order by tbname" # partition by tbname
}
self.checkdiff(**case19)
- # # case20~21: with order by
- # case20 = {"condition": "order by ts"}
- # self.checkdiff(**case20)
+ # case20~21: with order by , Not a single-group group function
- # # case22: with union
+ # case22: with union
# case22 = {
- # "condition": "union all select diff(c1) from t2"
+ # "condition": "union all select diff(c1) from db.t2 "
# }
# self.checkdiff(**case22)
+ tdSql.query("select count(c1) from db.t1 union all select count(c1) from db.t2")
# case23: with limit/slimit
case23 = {
"condition": "limit 1"
}
self.checkdiff(**case23)
- # case24 = {
- # "table_expr": "db.stb1",
- # "condition": "group by tbname slimit 1 soffset 1"
- # }
- # self.checkdiff(**case24)
+ case24 = {
+ "table_expr": "db.stb1",
+ "condition": "partition by tbname order by tbname slimit 1 soffset 1"
+ }
+ self.checkdiff(**case24)
pass
@@ -284,9 +279,9 @@ class TDTestCase:
tdSql.query(self.diff_query_form(alias=", c2")) # mix with other 1
# tdSql.error(self.diff_query_form(table_expr="db.stb1")) # select stb directly
stb_join = {
- "col": "stb1.c1",
- "table_expr": "stb1, stb2",
- "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
+ "col": "stable1.c1",
+ "table_expr": "db.stb1 as stable1, db.stb2 as stable2",
+ "condition": "where stable1.ts=stable2.ts and stable1.st1=stable2.st2 order by stable1.ts"
}
tdSql.query(self.diff_query_form(**stb_join)) # stb join
interval_sql = {
@@ -315,20 +310,20 @@ class TDTestCase:
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into db.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into db.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
- f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ f"insert into db.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
pass
@@ -349,8 +344,8 @@ class TDTestCase:
"create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
)
for i in range(tbnum):
- tdSql.execute(f"create table t{i} using db.stb1 tags({i})")
- tdSql.execute(f"create table tt{i} using db.stb2 tags({i})")
+ tdSql.execute(f"create table db.t{i} using db.stb1 tags({i})")
+ tdSql.execute(f"create table db.tt{i} using db.stb2 tags({i})")
pass
def diff_support_stable(self):
@@ -398,8 +393,8 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5})")
self.diff_current_query()
self.diff_error_query()
@@ -430,9 +425,9 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
self.diff_current_query()
self.diff_error_query()
diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py
index 934ba9e161c8787dc36cfdafc15044eb9e0ec425..5550519e05249de13d1267dd2a8f5bc1b10fae6d 100644
--- a/tests/system-test/2-query/interp.py
+++ b/tests/system-test/2-query/interp.py
@@ -551,7 +551,57 @@ class TDTestCase:
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
- tdLog.printNoPrefix("==========step9:test error cases")
+ tdLog.printNoPrefix("==========step9:test multi-interp cases")
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(null)")
+ tdSql.checkRows(5)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, None)
+ tdSql.checkData(1, i, None)
+ tdSql.checkData(2, i, 15)
+ tdSql.checkData(3, i, None)
+ tdSql.checkData(4, i, None)
+
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1)")
+ tdSql.checkRows(5)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, 1)
+ tdSql.checkData(1, i, 1)
+ tdSql.checkData(2, i, 15)
+ tdSql.checkData(3, i, 1)
+ tdSql.checkData(4, i, 1)
+
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(prev)")
+ tdSql.checkRows(5)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, 5)
+ tdSql.checkData(1, i, 5)
+ tdSql.checkData(2, i, 15)
+ tdSql.checkData(3, i, 15)
+ tdSql.checkData(4, i, 15)
+
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)")
+ tdSql.checkRows(3)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, 15)
+ tdSql.checkData(1, i, 15)
+ tdSql.checkData(2, i, 15)
+
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, 15)
+
+ tdLog.printNoPrefix("==========step10:test error cases")
tdSql.error(f"select interp(c0) from {dbname}.{tbname}")
tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05')")
diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py
index 5533cb840e29d2e0b109687f2aa3189d2c26a381..5c8fe0f0f96e6c28aa1ef70240b3ef4d5b0598fa 100644
--- a/tests/system-test/2-query/join2.py
+++ b/tests/system-test/2-query/join2.py
@@ -52,12 +52,12 @@ class TDTestCase:
return query_condition
- def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
+ def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False, alias_tb1="tb1", alias_tb2="tb2"):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
- join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
+ join_condition += f" as {alias_tb1} {join} {tb_list[i+1]} as {alias_tb2} on {alias_tb1}.{filter}={alias_tb2}.{filter}"
return join_condition
@@ -123,28 +123,28 @@ class TDTestCase:
sqls = []
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
- for join_tb in join_tblist:
- select_claus_list = self.__query_condition(join_tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition( col=select_claus)
- where_claus = self.__where_condition( query_conditon=select_claus )
- having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
- sqls.extend(
- (
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist)),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ),
- )
+ alias_tb = "tb1"
+ select_claus_list = self.__query_condition(alias_tb)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition( col=select_claus)
+ where_claus = self.__where_condition( query_conditon=select_claus )
+ having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
+ sqls.extend(
+ (
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb)),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, ),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), having_claus ),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True) ),
)
+ )
return list(filter(None, sqls))
def __join_check(self,):
@@ -341,10 +341,8 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute(f"flush database db")
- tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py
index 856d7647477f8693e0f20f6950e1ab810c47b4d4..d9715579aed4878c1cf17642824718d412a77511 100644
--- a/tests/system-test/2-query/json_tag.py
+++ b/tests/system-test/2-query/json_tag.py
@@ -338,7 +338,7 @@ class TDTestCase:
tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 1 and 30")
tdSql.checkRows(3)
tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 'femail' and 'beijing'")
- tdSql.checkRows(2)
+ tdSql.checkRows(0)
# test with tbname/normal column
tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1'")
diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py
index f65744a0b7666a2fc9d0e60332fc20fb53ca6886..5d435b068fb12959fd2bdc6f02968b2a7ffe7c9d 100644
--- a/tests/system-test/2-query/last_row.py
+++ b/tests/system-test/2-query/last_row.py
@@ -638,13 +638,13 @@ class TDTestCase:
tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,1,None)
- tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , max(c1) c1 ,t1 from {dbname}.stb1 where ts >now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts 0:
- elem = math.log(elem)
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
- def check_result_auto_log2(self ,origin_query , log_query):
-
- log_result = tdSql.getResult(log_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = math.log(elem,2)
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
- def check_result_auto_log1(self ,origin_query , log_query):
- log_result = tdSql.getResult(log_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = None
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
- def check_result_auto_log__10(self ,origin_query , log_query):
log_result = tdSql.getResult(log_query)
origin_result = tdSql.getResult(origin_query)
@@ -163,26 +75,30 @@ class TDTestCase:
for row in origin_result:
row_check = []
for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = None
- elif elem <=0:
+ if base ==1:
elem = None
+ else:
+ if elem == None:
+ elem = None
+ elif elem ==1:
+ elem = 0.0
+ elif elem >0 and elem !=1 :
+ if base==None :
+ elem = math.log(elem )
+ else:
+ print(base , elem)
+ elem = math.log(elem , base)
+ elif elem <=0:
+ elem = None
+
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(log_query)
for row_index , row in enumerate(log_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
+ tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index])
+
def test_errors(self, dbname="db"):
error_sql_lists = [
f"select log from {dbname}.t1",
@@ -328,10 +244,10 @@ class TDTestCase:
tdSql.checkData(3 , 0, 1.098612289)
tdSql.checkData(4 , 0, 1.386294361)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from {dbname}.t1")
+ self.check_result_auto_log( None , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1")
+ self.check_result_auto_log( 1, f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1")
+ self.check_result_auto_log( 10 ,f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,10), log(c2 ,10) ,log(c3, 10), log(c4 ,10), log(c5 ,10) from {dbname}.t1")
# used for sub table
tdSql.query(f"select c1 ,log(c1 ,3) from {dbname}.ct1")
@@ -349,9 +265,9 @@ class TDTestCase:
tdSql.checkData(3 , 2, 0.147315235)
tdSql.checkData(4 , 2, None)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from {dbname}.ct1")
+ self.check_result_auto_log( None ,f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1")
+ self.check_result_auto_log( 2, f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1")
+ self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) from {dbname}.ct1")
# nest query for log functions
tdSql.query(f"select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from {dbname}.ct1;")
@@ -585,15 +501,15 @@ class TDTestCase:
tdSql.error(
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from {dbname}.sub1_bound")
+ self.check_result_auto_log(None , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) ,log(c6,10) from {dbname}.sub1_bound")
- self.check_result_auto_log2( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound")
- self.check_result_auto_log( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log( None , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound")
- self.check_result_auto_log2(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" )
+ self.check_result_auto_log(2 , f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" )
# check basic elem for table per row
tdSql.query(f"select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from {dbname}.sub1_bound ")
@@ -647,15 +563,15 @@ class TDTestCase:
def support_super_table_test(self, dbname="db"):
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 ,f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
diff --git a/tests/system-test/2-query/lower.py b/tests/system-test/2-query/lower.py
index 0917fb63fc638263849625aec5b907c05260f49f..0e33e3834ec9ecc50470f0793b29a3a4b84d4834 100644
--- a/tests/system-test/2-query/lower.py
+++ b/tests/system-test/2-query/lower.py
@@ -96,16 +96,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__lower_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__lower_err_check(tb):
@@ -113,22 +113,20 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
-
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -138,78 +136,78 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
@@ -227,10 +225,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
-
- tdSql.execute("use db")
+ tdSql.execute("flush database db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py
index 15f40a09c3db67e4324e75768532221f55f2e35f..330f688990d614c1a824fd25741f19966e227581 100644
--- a/tests/system-test/2-query/ltrim.py
+++ b/tests/system-test/2-query/ltrim.py
@@ -23,6 +23,7 @@ CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
+DBNAME = "db"
class TDTestCase:
@@ -120,16 +121,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname=DBNAME): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__ltrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__ltrim_err_check(tb):
@@ -142,17 +143,16 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -162,29 +162,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -200,7 +200,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -216,13 +216,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -251,8 +251,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py
index 0995dfc6ffedb28232c56d6f4826b8b2454249ff..b52217af9ac61e5a3c08d55b11e4219ec826b203 100644
--- a/tests/system-test/2-query/mavg.py
+++ b/tests/system-test/2-query/mavg.py
@@ -307,7 +307,7 @@ class TDTestCase:
pass
- def mavg_current_query(self) :
+ def mavg_current_query(self, dbname="db") :
# table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
# c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
@@ -325,17 +325,17 @@ class TDTestCase:
case6 = {"col": "c9"}
self.checkmavg(**case6)
- # # case7~8: nested query
- # case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"}
- # self.checkmavg(**case7)
- # case8 = {"table_expr": f"(select mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"}
+ # case7~8: nested query
+ case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"}
+ self.checkmavg(**case7)
+ # case8 = {"table_expr": f"(select _c0, mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"}
# self.checkmavg(**case8)
# case9~10: mix with tbname/ts/tag/col
- # case9 = {"alias": ", tbname"}
- # self.checkmavg(**case9)
- # case10 = {"alias": ", _c0"}
- # self.checkmavg(**case10)
+ case9 = {"alias": ", tbname"}
+ self.checkmavg(**case9)
+ case10 = {"alias": ", _c0"}
+ self.checkmavg(**case10)
# case11 = {"alias": ", st1"}
# self.checkmavg(**case11)
# case12 = {"alias": ", c1"}
@@ -356,7 +356,7 @@ class TDTestCase:
# case17: only support normal table join
case17 = {
"col": "t1.c1",
- "table_expr": "t1, t2",
+ "table_expr": f"{dbname}.t1 t1, {dbname}.t2 t2",
"condition": "where t1.ts=t2.ts"
}
self.checkmavg(**case17)
@@ -367,14 +367,14 @@ class TDTestCase:
# }
# self.checkmavg(**case19)
- # case20~21: with order by
+ # # case20~21: with order by
# case20 = {"condition": "order by ts"}
# self.checkmavg(**case20)
- #case21 = {
- # "table_expr": f"{dbname}.stb1",
- # "condition": "group by tbname order by tbname"
- #}
- #self.checkmavg(**case21)
+ case21 = {
+ "table_expr": f"{dbname}.stb1",
+ "condition": "group by tbname order by tbname"
+ }
+ self.checkmavg(**case21)
# # case22: with union
# case22 = {
@@ -398,7 +398,7 @@ class TDTestCase:
pass
- def mavg_error_query(self) -> None :
+ def mavg_error_query(self, dbname="db") -> None :
# unusual test
# form test
@@ -419,9 +419,9 @@ class TDTestCase:
err8 = {"table_expr": ""}
self.checkmavg(**err8) # no table_expr
- # err9 = {"col": "st1"}
+ err9 = {"col": "st1"}
# self.checkmavg(**err9) # col: tag
- # err10 = {"col": 1}
+ err10 = {"col": 1}
# self.checkmavg(**err10) # col: value
err11 = {"col": "NULL"}
self.checkmavg(**err11) # col: NULL
@@ -496,7 +496,7 @@ class TDTestCase:
# "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
# }
# self.checkmavg(**err44) # stb join
- tdSql.query("select mavg( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;")
+ tdSql.query(f"select mavg( stb1.c1 , 1 ) from {dbname}.stb1 stb1, {dbname}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;")
err45 = {
"condition": "where ts>0 and ts < now interval(1h) fill(next)"
}
diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py
index 34442a3725d69092535c02a509ba8cece4c10ed4..169b1c2c387c7158635483f8ce8868891e42e3c2 100644
--- a/tests/system-test/2-query/max.py
+++ b/tests/system-test/2-query/max.py
@@ -5,10 +5,7 @@ import numpy as np
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
@@ -17,60 +14,80 @@ class TDTestCase:
self.ts = 1537146000000
self.binary_str = 'taosdata'
self.nchar_str = '涛思数据'
- def max_check_stb_and_tb_base(self):
+ def max_check_stb_and_tb_base(self, dbname="db"):
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
+ tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
- tdSql.execute("create table stb_1 using stb tags('beijing')")
+ tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
for i in range(self.rowNum):
- tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
+ tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
- for i in ['ts','col11','col12','col13']:
- for j in ['db.stb','stb','db.stb_1','stb_1']:
- tdSql.error(f'select max({i} from {j} )')
+ for i in ['col11','col12','col13']:
+ for j in ['stb','stb_1']:
+ tdSql.error(f'select max({i} from {dbname}.{j} )')
for i in range(1,11):
- for j in ['db.stb','stb','db.stb_1','stb_1']:
- tdSql.query(f"select max(col{i}) from {j}")
+ for j in ['stb', 'stb_1']:
+ tdSql.query(f"select max(col{i}) from {dbname}.{j}")
if i<9:
tdSql.checkData(0, 0, np.max(intData))
elif i>=9:
tdSql.checkData(0, 0, np.max(floatData))
- tdSql.query("select max(col1) from stb_1 where col2<=5")
+
+ tdSql.query(f"select max(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select last(ts) from {dbname}.stb_1")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select last(ts) from {dbname}.stb")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.stb")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select max(col1) from {dbname}.stb_1 where col2<=5")
tdSql.checkData(0,0,5)
- tdSql.query("select max(col1) from stb where col2<=5")
+ tdSql.query(f"select max(col1) from {dbname}.stb where col2<=5")
tdSql.checkData(0,0,5)
- tdSql.execute('drop database db')
- def max_check_ntb_base(self):
+ def max_check_ntb_base(self, dbname="db"):
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
+ tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''')
for i in range(self.rowNum):
- tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
+ tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
- for i in ['ts','col11','col12','col13']:
- for j in ['db.ntb','ntb']:
- tdSql.error(f'select max({i} from {j} )')
+ for i in ['col11','col12','col13']:
+ for j in ['ntb']:
+ tdSql.error(f'select max({i} from {dbname}.{j} )')
for i in range(1,11):
- for j in ['db.ntb','ntb']:
- tdSql.query(f"select max(col{i}) from {j}")
+ for j in ['ntb']:
+ tdSql.query(f"select max(col{i}) from {dbname}.{j}")
if i<9:
tdSql.checkData(0, 0, np.max(intData))
elif i>=9:
tdSql.checkData(0, 0, np.max(floatData))
- tdSql.query("select max(col1) from ntb where col2<=5")
- tdSql.checkData(0,0,5)
- tdSql.execute('drop database db')
+ tdSql.query(f"select max(now()) from {dbname}.ntb")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select last(ts) from {dbname}.ntb")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.ntb")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select max(col1) from {dbname}.ntb where col2<=5")
+ tdSql.checkData(0,0,5)
def check_max_functions(self, tbname , col_name):
@@ -90,55 +107,55 @@ class TDTestCase:
tdLog.info(" max function work as expected, sql : %s "% max_sql)
- def support_distributed_aggregate(self):
+ def support_distributed_aggregate(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
+ tdSql.execute(f"use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
- tbname = "ct"+f'{i}'
+ tbname = f"{dbname}.ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -157,7 +174,7 @@ class TDTestCase:
tdLog.info(" prepare data for distributed_aggregate done! ")
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -167,7 +184,7 @@ class TDTestCase:
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query("select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'")
+ tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -182,13 +199,13 @@ class TDTestCase:
# check max function work status
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
- tdSql.query("desc stb1")
+ tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@@ -198,11 +215,7 @@ class TDTestCase:
for tablename in tablenames:
for colname in colnames:
- self.check_max_functions(tablename,colname)
-
- # max function with basic filter
- print(vnode_tables)
-
+ self.check_max_functions(f"{dbname}.{tablename}", colname)
def run(self):
diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py
index 4b9996d9c3b1d45f52e184f1da4ec8e59714feaa..01c267724210591e639753c3566c4826a5218813 100644
--- a/tests/system-test/2-query/max_partition.py
+++ b/tests/system-test/2-query/max_partition.py
@@ -12,16 +12,15 @@ class TDTestCase:
self.tb_nums = 10
self.ts = 1537146000000
- def prepare_datas(self, stb_name , tb_nums , row_nums ):
- tdSql.execute(" use db ")
- tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
+ def prepare_datas(self, stb_name , tb_nums , row_nums, dbname="db" ):
+ tdSql.execute(f" create stable {dbname}.{stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
for i in range(tb_nums):
- tbname = f"sub_{stb_name}_{i}"
+ tbname = f"{dbname}.sub_{stb_name}_{i}"
ts = self.ts + i*10000
- tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
+ tdSql.execute(f"create table {tbname} using {dbname}.{stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
for row in range(row_nums):
ts = self.ts + row*1000
@@ -31,191 +30,192 @@ class TDTestCase:
ts = self.ts + row_nums*1000 + null*1000
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
- def basic_query(self):
- tdSql.query("select count(*) from stb")
+ def basic_query(self, dbname="db"):
+ tdSql.query(f"select count(*) from {dbname}.stb")
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
- tdSql.query("select max(c1) from stb")
+ tdSql.query(f"select max(c1) from {dbname}.stb")
tdSql.checkData(0,0,(self.row_nums -1))
- tdSql.query(" select tbname , max(c1) from stb partition by tbname ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by t1 order by t1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by t1 order by t1 ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by c1 order by t1 ")
- tdSql.query(" select max(t2) from stb group by c1 order by t1 ")
- tdSql.query(" select max(c1) from stb group by tbname order by tbname ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by t1 ")
+ tdSql.query(f"select max(t2) from {dbname}.stb group by c1 order by t1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
# bug need fix
- tdSql.query(" select max(t2) from stb group by t2 order by t2 ")
+ tdSql.query(f"select max(t2) from {dbname}.stb group by t2 order by t2 ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by c1 order by c1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by c1 ")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ")
+ tdSql.query(f"select c1 , max(c1) from {dbname}.stb group by c1 order by c1 ")
tdSql.checkRows(self.row_nums+1)
# support selective functions
- tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
+ tdSql.query(f"select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
+ tdSql.query(f"select c1, tbname , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ")
tdSql.checkRows(self.row_nums+1)
# bug need fix
- # tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ")
- # tdSql.checkRows(1)
- # tdSql.checkData(0,0,"sub_stb_1")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 where c1 is null group by c1 order by c1 desc ")
+ tdSql.checkRows(1)
+ tdSql.checkData(0,0,"sub_stb_1")
- tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)")
+ tdSql.query(f"select max(c1) ,c2 ,t2,tbname from {dbname}.stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)")
+ tdSql.query(f"select abs(c1+c3), count(c1+c3) ,max(c1+t2) from {dbname}.stb group by abs(c1+c3) order by abs(c1+c3)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)")
+ tdSql.query(f"select max(c1+c3)+min(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2")
- tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2")
- tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
+ tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from {dbname}.stb group by abs(c1) order by abs(t1)+c2")
+ tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)+c2")
+ tdSql.query(f"select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from {dbname}.stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ")
+ tdSql.query(f"select max(c1) , max(t2) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ")
tdSql.checkRows(2)
- tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ")
+ tdSql.query(f"select max(c1) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ")
tdSql.checkRows(2)
- tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1")
- tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1")
- tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2")
+ tdSql.query(f"select tbname , max(c2) from {dbname}.stb partition by t1 order by t1")
+ tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t1 order by t1")
+ tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t2 order by t2")
# # bug need fix
- tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2")
+ tdSql.query(f"select t2 , max(t2) from {dbname}.stb partition by t2 order by t2")
tdSql.checkRows(self.tb_nums)
- tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by t2 order by t2")
- tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc")
+ tdSql.query(f"select c2, max(c1) from {dbname}.stb partition by c2 order by c2 desc")
tdSql.checkRows(self.tb_nums+1)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by c1 order by c2")
- tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2")
+ tdSql.query(f"select tbname , abs(t2) from {dbname}.stb partition by c2 order by t2")
tdSql.checkRows(self.tb_nums*(self.row_nums+5))
- tdSql.query("select max(c1) , count(t2) from stb partition by c2 ")
+ tdSql.query(f"select max(c1) , count(t2) from {dbname}.stb partition by c2 ")
tdSql.checkRows(self.row_nums+1)
tdSql.checkData(0,1,self.row_nums)
- tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2")
+ tdSql.query(f"select count(c1) , max(t2) ,c2 from {dbname}.stb partition by c2 order by c2")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname")
+ tdSql.query(f"select count(c1) , count(t1) ,max(c2) ,tbname from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkCols(4)
- tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1")
+ tdSql.query(f"select count(c1) , max(t2) ,t1 from {dbname}.stb partition by t1 order by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,self.row_nums)
# bug need fix
- tdSql.query("select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
+ tdSql.query(f"select count(c1) , max(t2) ,abs(c1) from {dbname}.stb partition by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
+ tdSql.query(f"select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from {dbname}.stb partition by abs(c2) order by abs(c2)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))")
+ tdSql.query(f"select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from {dbname}.stb partition by abs(floor(c1)) order by abs(floor(c1))")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select tbname , max(c1) ,c1 from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,'sub_stb_0')
tdSql.checkData(0,1,9)
tdSql.checkData(0,2,9)
- tdSql.query("select tbname ,top(c1,1) ,c1 from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname ,top(c1,1) ,c1 from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select c1 , sample(c1,2) from stb partition by tbname order by tbname ")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by tbname order by tbname ")
tdSql.checkRows(self.tb_nums*2)
# interval
- tdSql.query("select max(c1) from stb interval(2s) sliding(1s)")
+ tdSql.query(f"select max(c1) from {dbname}.stb interval(2s) sliding(1s)")
# bug need fix
- tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
- tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname interval(10s) slimit 5 soffset 1 ")
- tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname interval(10s)")
tdSql.checkRows(self.row_nums*2)
- tdSql.query("select unique(c1) from stb partition by tbname order by tbname")
+ tdSql.query(f"select unique(c1) from {dbname}.stb partition by tbname order by tbname")
- tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s)")
tdSql.checkData(0,0,'sub_stb_1')
tdSql.checkData(0,1,self.row_nums)
- tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1")
+ tdSql.query(f"select c1 , mavg(c1 ,2 ) from {dbname}.stb partition by c1")
tdSql.checkRows(90)
- tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1")
+ tdSql.query(f"select c1 , diff(c1 , 0) from {dbname}.stb partition by c1")
tdSql.checkRows(90)
- tdSql.query("select c1 , csum(c1) from stb partition by c1")
+ tdSql.query(f"select c1 , csum(c1) from {dbname}.stb partition by c1")
tdSql.checkRows(100)
- tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(21)
# bug need fix
- # tdSql.checkData(0,1,None)
+ tdSql.checkData(0,1,None)
- tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , twa(c1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(11)
tdSql.checkData(0,1,None)
- tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , irate(c1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(11)
tdSql.checkData(0,1,None)
- tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , DERIVATIVE(c1,2,1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(90)
# bug need fix
tdSql.checkData(0,1,None)
- tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname slimit 5 soffset 0 ")
tdSql.checkRows(10)
- tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
- tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
- tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
+ tdSql.query(f'select tbname , max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
def run(self):
+ dbname = "db"
tdSql.prepare()
self.prepare_datas("stb",self.tb_nums,self.row_nums)
self.basic_query()
# # coverage case for taosd crash about bug fix
- tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
- tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
- tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
- tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
- tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
+ tdSql.query(f"select sum(c1) from {dbname}.stb where t2+10 >1 ")
+ tdSql.query(f"select count(c1),count(t1) from {dbname}.stb where -t2<1 ")
+ tdSql.query(f"select tbname ,max(ceil(c1)) from {dbname}.stb group by tbname ")
+ tdSql.query(f"select avg(abs(c1)) , tbname from {dbname}.stb group by tbname ")
+ tdSql.query(f"select t1,c1 from {dbname}.stb where abs(t2+c1)=1 ")
def stop(self):
diff --git a/tests/system-test/2-query/min.py b/tests/system-test/2-query/min.py
index c27e9926ff52e178afe230872d70c6ab269d6983..3d46b7b2224f834360c17cdc311dbf1e0d5a4535 100644
--- a/tests/system-test/2-query/min.py
+++ b/tests/system-test/2-query/min.py
@@ -14,198 +14,159 @@ class TDTestCase:
self.ts = 1537146000000
def run(self):
+ dbname = "db"
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
- tdSql.execute("create table stb_1 using stb tags('beijing')")
- tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
+ tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum):
- tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
for i in range(self.rowNum):
- tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
# max verifacation
- tdSql.error("select min(ts) from stb_1")
- tdSql.error("select min(ts) from db.stb_1")
- tdSql.error("select min(col7) from stb_1")
- tdSql.error("select min(col7) from db.stb_1")
- tdSql.error("select min(col8) from stb_1")
- tdSql.error("select min(col8) from db.stb_1")
- tdSql.error("select min(col9) from stb_1")
- tdSql.error("select min(col9) from db.stb_1")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from stb_1")
- tdSql.error("select min(count(c1),count(c2)) from stb_1")
+ tdSql.error(f"select min(col7) from {dbname}.stb_1")
+ tdSql.error(f"select min(col8) from {dbname}.stb_1")
+ tdSql.error(f"select min(col9) from {dbname}.stb_1")
+ tdSql.error(f"select min(a) from {dbname}.stb_1")
+ tdSql.query(f"select min(1) from {dbname}.stb_1")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1")
- tdSql.query("select min(col1) from stb_1")
+ tdSql.query(f"select min(col1) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.stb_1")
+ tdSql.query(f"select min(col2) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from stb_1")
+ tdSql.query(f"select min(col3) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.stb_1")
+ tdSql.query(f"select min(col4) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from stb_1")
+ tdSql.query(f"select min(col11) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.stb_1")
+ tdSql.query(f"select min(col12) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from stb_1")
+ tdSql.query(f"select min(col13) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.stb_1")
+ tdSql.query(f"select min(col14) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from stb_1")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.stb_1")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from stb_1")
+ tdSql.query(f"select min(col5) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.stb_1")
+ tdSql.query(f"select min(col6) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from stb_1 where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.stb_1 where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
- tdSql.error("select min(ts) from stb_1")
- tdSql.error("select min(ts) from db.stb_1")
- tdSql.error("select min(col7) from stb_1")
- tdSql.error("select min(col7) from db.stb_1")
- tdSql.error("select min(col8) from stb_1")
- tdSql.error("select min(col8) from db.stb_1")
- tdSql.error("select min(col9) from stb_1")
- tdSql.error("select min(col9) from db.stb_1")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from stb_1")
- tdSql.error("select min(count(c1),count(c2)) from stb_1")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
- tdSql.query("select min(col1) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from stb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+
+ tdSql.error(f"select min(col7) from {dbname}.stb_1")
+ tdSql.error(f"select min(col8) from {dbname}.stb_1")
+ tdSql.error(f"select min(col9) from {dbname}.stb_1")
+ tdSql.error(f"select min(a) from {dbname}.stb_1")
+ tdSql.query(f"select min(1) from {dbname}.stb_1")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1")
+
+ tdSql.query(f"select min(col1) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.stb")
+ tdSql.query(f"select min(col2) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from stb")
+ tdSql.query(f"select min(col3) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.stb")
+ tdSql.query(f"select min(col4) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from stb")
+ tdSql.query(f"select min(col11) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.stb")
+ tdSql.query(f"select min(col12) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from stb")
+ tdSql.query(f"select min(col13) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.stb")
+ tdSql.query(f"select min(col14) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from stb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.stb")
+ tdSql.query(f"select min(col5) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from stb")
+ tdSql.query(f"select min(col6) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.stb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from stb where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.stb where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
- tdSql.error("select min(ts) from ntb")
- tdSql.error("select min(ts) from db.ntb")
- tdSql.error("select min(col7) from ntb")
- tdSql.error("select min(col7) from db.ntb")
- tdSql.error("select min(col8) from ntb")
- tdSql.error("select min(col8) from db.ntb")
- tdSql.error("select min(col9) from ntb")
- tdSql.error("select min(col9) from db.ntb")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from ntb")
- tdSql.error("select min(count(c1),count(c2)) from ntb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
- tdSql.query("select min(col1) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from ntb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+ tdSql.error(f"select min(col7) from {dbname}.ntb")
+ tdSql.error(f"select min(col8) from {dbname}.ntb")
+ tdSql.error(f"select min(col9) from {dbname}.ntb")
+ tdSql.error(f"select min(a) from {dbname}.ntb")
+ tdSql.query(f"select min(1) from {dbname}.ntb")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.ntb")
+
+ tdSql.query(f"select min(col1) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.ntb")
+ tdSql.query(f"select min(col2) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from ntb")
+ tdSql.query(f"select min(col3) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.ntb")
+ tdSql.query(f"select min(col4) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from ntb")
+ tdSql.query(f"select min(col11) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.ntb")
+ tdSql.query(f"select min(col12) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from ntb")
+ tdSql.query(f"select min(col13) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.ntb")
+ tdSql.query(f"select min(col14) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from ntb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.ntb")
+ tdSql.query(f"select min(col5) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from ntb")
+ tdSql.query(f"select min(col6) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.ntb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from ntb where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.ntb where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py
index 0d40ef8147eabe133973a15607c340243b69db92..931ff873dcce279d8ddff018549beb648c5cfbc4 100755
--- a/tests/system-test/2-query/nestedQuery_str.py
+++ b/tests/system-test/2-query/nestedQuery_str.py
@@ -24,9 +24,6 @@ from util.dnodes import tdDnodes
from util.dnodes import *
class TDTestCase:
- updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
diff --git a/tests/system-test/2-query/pow.py b/tests/system-test/2-query/pow.py
index 1af8bd3839beafe37f690abf14d85f3c0e224cb2..0702d05c0b7bf0989046ab1cfdfaa0d812c78407 100644
--- a/tests/system-test/2-query/pow.py
+++ b/tests/system-test/2-query/pow.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -65,257 +63,182 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
- def check_result_auto_pow2(self ,origin_query , pow_query):
+ def check_result_auto_pow(self ,base , origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- else:
- elem = math.pow(elem,2)
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(pow_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def check_result_auto_pow1(self ,origin_query , pow_query):
- pow_result = tdSql.getResult(pow_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
for row in origin_result:
row_check = []
for elem in row:
if elem == None:
elem = None
else :
- elem = pow(elem ,1)
+ elem = float(pow(elem ,base))
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def check_result_auto_pow__10(self ,origin_query , pow_query):
- pow_result = tdSql.getResult(pow_query)
- origin_result = tdSql.getResult(origin_query)
+ tdSql.checkData(row_index,col_index ,auto_result[row_index][col_index])
+
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem == 0:
- elem = None
- else:
- elem = pow(elem ,-10)
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(pow_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select pow from t1",
- # "select pow(-+--+c1 ,2) from t1",
- # "select +-pow(c1,2) from t1",
- # "select ++-pow(c1,2) from t1",
- # "select ++--pow(c1,2) from t1",
- # "select - -pow(c1,2)*0 from t1",
- # "select pow(tbname+1,2) from t1 ",
- "select pow(123--123,2)==1 from t1",
- "select pow(c1,2) as 'd1' from t1",
- "select pow(c1 ,c2 ,2) from t1",
- "select pow(c1 ,NULL ,2) from t1",
- "select pow(, 2) from t1;",
- "select pow(pow(c1, 2) ab from t1)",
- "select pow(c1 ,2 ) as int from t1",
- "select pow from stb1",
- # "select pow(-+--+c1) from stb1",
- # "select +-pow(c1) from stb1",
- # "select ++-pow(c1) from stb1",
- # "select ++--pow(c1) from stb1",
- # "select - -pow(c1)*0 from stb1",
- # "select pow(tbname+1) from stb1 ",
- "select pow(123--123 ,2)==1 from stb1",
- "select pow(c1 ,2) as 'd1' from stb1",
- "select pow(c1 ,c2 ,2 ) from stb1",
- "select pow(c1 ,NULL,2) from stb1",
- "select pow(,) from stb1;",
- "select pow(pow(c1 , 2) ab from stb1)",
- "select pow(c1 , 2) as int from stb1"
+ f"select pow from {dbname}.t1",
+ # f"select pow(-+--+c1 ,2) from {dbname}.t1",
+ # f"select +-pow(c1,2) from {dbname}.t1",
+ # f"select ++-pow(c1,2) from {dbname}.t1",
+ # f"select ++--pow(c1,2) from {dbname}.t1",
+ # f"select - -pow(c1,2)*0 from {dbname}.t1",
+ # f"select pow(tbname+1,2) from {dbname}.t1 ",
+ f"select pow(123--123,2)==1 from {dbname}.t1",
+ f"select pow(c1,2) as 'd1' from {dbname}.t1",
+ f"select pow(c1 ,c2 ,2) from {dbname}.t1",
+ f"select pow(c1 ,NULL ,2) from {dbname}.t1",
+ f"select pow(, 2) from {dbname}.t1;",
+ f"select pow(pow(c1, 2) ab from {dbname}.t1)",
+ f"select pow(c1 ,2 ) as int from {dbname}.t1",
+ f"select pow from {dbname}.stb1",
+ # f"select pow(-+--+c1) from {dbname}.stb1",
+ # f"select +-pow(c1) from {dbname}.stb1",
+ # f"select ++-pow(c1) from {dbname}.stb1",
+ # f"select ++--pow(c1) from {dbname}.stb1",
+ # f"select - -pow(c1)*0 from {dbname}.stb1",
+ # f"select pow(tbname+1) from {dbname}.stb1 ",
+ f"select pow(123--123 ,2)==1 from {dbname}.stb1",
+ f"select pow(c1 ,2) as 'd1' from {dbname}.stb1",
+ f"select pow(c1 ,c2 ,2 ) from {dbname}.stb1",
+ f"select pow(c1 ,NULL,2) from {dbname}.stb1",
+ f"select pow(,) from {dbname}.stb1;",
+ f"select pow(pow(c1 , 2) ab from {dbname}.stb1)",
+ f"select pow(c1 , 2) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select pow(ts ,2 ) from t1" ,
- "select pow(c7,c1 ) from t1",
- "select pow(c8,c2) from t1",
- "select pow(c9,c3 ) from t1",
- "select pow(ts,c4 ) from ct1" ,
- "select pow(c7,c5 ) from ct1",
- "select pow(c8,c6 ) from ct1",
- "select pow(c9,c8 ) from ct1",
- "select pow(ts,2 ) from ct3" ,
- "select pow(c7,2 ) from ct3",
- "select pow(c8,2 ) from ct3",
- "select pow(c9,2 ) from ct3",
- "select pow(ts,2 ) from ct4" ,
- "select pow(c7,2 ) from ct4",
- "select pow(c8,2 ) from ct4",
- "select pow(c9,2 ) from ct4",
- "select pow(ts,2 ) from stb1" ,
- "select pow(c7,2 ) from stb1",
- "select pow(c8,2 ) from stb1",
- "select pow(c9,2 ) from stb1" ,
-
- "select pow(ts,2 ) from stbbb1" ,
- "select pow(c7,2 ) from stbbb1",
-
- "select pow(ts,2 ) from tbname",
- "select pow(c9,2 ) from tbname"
+ f"select pow(ts ,2 ) from {dbname}.t1" ,
+ f"select pow(c7,c1 ) from {dbname}.t1",
+ f"select pow(c8,c2) from {dbname}.t1",
+ f"select pow(c9,c3 ) from {dbname}.t1",
+ f"select pow(ts,c4 ) from {dbname}.ct1" ,
+ f"select pow(c7,c5 ) from {dbname}.ct1",
+ f"select pow(c8,c6 ) from {dbname}.ct1",
+ f"select pow(c9,c8 ) from {dbname}.ct1",
+ f"select pow(ts,2 ) from {dbname}.ct3" ,
+ f"select pow(c7,2 ) from {dbname}.ct3",
+ f"select pow(c8,2 ) from {dbname}.ct3",
+ f"select pow(c9,2 ) from {dbname}.ct3",
+ f"select pow(ts,2 ) from {dbname}.ct4" ,
+ f"select pow(c7,2 ) from {dbname}.ct4",
+ f"select pow(c8,2 ) from {dbname}.ct4",
+ f"select pow(c9,2 ) from {dbname}.ct4",
+ f"select pow(ts,2 ) from {dbname}.stb1" ,
+ f"select pow(c7,2 ) from {dbname}.stb1",
+ f"select pow(c8,2 ) from {dbname}.stb1",
+ f"select pow(c9,2 ) from {dbname}.stb1" ,
+
+ f"select pow(ts,2 ) from {dbname}.stbbb1" ,
+ f"select pow(c7,2 ) from {dbname}.stbbb1",
+
+ f"select pow(ts,2 ) from {dbname}.tbname",
+ f"select pow(c9,2 ) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select pow(c1,2 ) from t1",
- "select pow(c2,2 ) from t1",
- "select pow(c3,2 ) from t1",
- "select pow(c4,2 ) from t1",
- "select pow(c5,2 ) from t1",
- "select pow(c6,2 ) from t1",
-
- "select pow(c1,2 ) from ct1",
- "select pow(c2,2 ) from ct1",
- "select pow(c3,2 ) from ct1",
- "select pow(c4,2 ) from ct1",
- "select pow(c5,2 ) from ct1",
- "select pow(c6,2 ) from ct1",
-
- "select pow(c1,2 ) from ct3",
- "select pow(c2,2 ) from ct3",
- "select pow(c3,2 ) from ct3",
- "select pow(c4,2 ) from ct3",
- "select pow(c5,2 ) from ct3",
- "select pow(c6,2 ) from ct3",
-
- "select pow(c1,2 ) from stb1",
- "select pow(c2,2 ) from stb1",
- "select pow(c3,2 ) from stb1",
- "select pow(c4,2 ) from stb1",
- "select pow(c5,2 ) from stb1",
- "select pow(c6,2 ) from stb1",
-
- "select pow(c6,2) as alisb from stb1",
- "select pow(c6,2) alisb from stb1",
+ f"select pow(c1,2 ) from {dbname}.t1",
+ f"select pow(c2,2 ) from {dbname}.t1",
+ f"select pow(c3,2 ) from {dbname}.t1",
+ f"select pow(c4,2 ) from {dbname}.t1",
+ f"select pow(c5,2 ) from {dbname}.t1",
+ f"select pow(c6,2 ) from {dbname}.t1",
+
+ f"select pow(c1,2 ) from {dbname}.ct1",
+ f"select pow(c2,2 ) from {dbname}.ct1",
+ f"select pow(c3,2 ) from {dbname}.ct1",
+ f"select pow(c4,2 ) from {dbname}.ct1",
+ f"select pow(c5,2 ) from {dbname}.ct1",
+ f"select pow(c6,2 ) from {dbname}.ct1",
+
+ f"select pow(c1,2 ) from {dbname}.ct3",
+ f"select pow(c2,2 ) from {dbname}.ct3",
+ f"select pow(c3,2 ) from {dbname}.ct3",
+ f"select pow(c4,2 ) from {dbname}.ct3",
+ f"select pow(c5,2 ) from {dbname}.ct3",
+ f"select pow(c6,2 ) from {dbname}.ct3",
+
+ f"select pow(c1,2 ) from {dbname}.stb1",
+ f"select pow(c2,2 ) from {dbname}.stb1",
+ f"select pow(c3,2 ) from {dbname}.stb1",
+ f"select pow(c4,2 ) from {dbname}.stb1",
+ f"select pow(c5,2 ) from {dbname}.stb1",
+ f"select pow(c6,2 ) from {dbname}.stb1",
+
+ f"select pow(c6,2) as alisb from {dbname}.stb1",
+ f"select pow(c6,2) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_pow_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_pow_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select pow(c1 ,2) from ct3")
+ tdSql.query(f"select pow(c1 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c2 ,2) from ct3")
+ tdSql.query(f"select pow(c2 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c3 ,2) from ct3")
+ tdSql.query(f"select pow(c3 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c4 ,2) from ct3")
+ tdSql.query(f"select pow(c4 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c5 ,2) from ct3")
+ tdSql.query(f"select pow(c5 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c6 ,2) from ct3")
+ tdSql.query(f"select pow(c6 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select pow(c1 ,2) from t1")
+ tdSql.query(f"select pow(c1 ,2) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.000000000)
tdSql.checkData(3 , 0, 9.000000000)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,2), pow(c2 ,2) ,pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from t1")
- self.check_result_auto_pow1( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,1), pow(c2 ,1) ,pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from t1")
- self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,-10), pow(c2 ,-10) ,pow(c3, -10), pow(c4 ,-10), pow(c5 ,-10) from t1")
-
+ self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,2) , pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from {dbname}.t1")
+ self.check_result_auto_pow( 1,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,1) , pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from {dbname}.t1")
+ self.check_result_auto_pow( 10,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,10) ,pow(c3, 10), pow(c4 ,10), pow(c5 ,10) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c1 ,pow(c1 ,2) from ct1")
+ tdSql.query(f"select c1 ,pow(c1 ,2) from {dbname}.ct1")
tdSql.checkData(0, 1, 64.000000000)
tdSql.checkData(1 , 1, 49.000000000)
tdSql.checkData(3 , 1, 25.000000000)
@@ -323,7 +246,7 @@ class TDTestCase:
# # test bug fix for pow(c1,c2)
- tdSql.query("select c1, c5 ,pow(c1,c5) from ct4")
+ tdSql.query(f"select c1, c5 ,pow(c1,c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 104577724.506799981)
tdSql.checkData(2 , 2, 3684781.623933245)
@@ -331,11 +254,11 @@ class TDTestCase:
tdSql.checkData(4 , 2, 7573.273783071)
- self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c4,2), pow(c5,2) from ct1")
- self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,-10), pow(c2,-10) ,pow(c3,-10), pow(c4,-10), pow(c5,-10) from ct1")
+ self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,2), pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.ct1")
+ self.check_result_auto_pow( 10, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,10), pow(c3,10), pow(c4,10), pow(c5,10) from {dbname}.ct1")
# nest query for pow functions
- tdSql.query("select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from ct1;")
+ tdSql.query(f"select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 8)
tdSql.checkData(0 , 1 , 64.000000000)
tdSql.checkData(0 , 2 , 4096.000000000)
@@ -351,24 +274,24 @@ class TDTestCase:
tdSql.checkData(4 , 2 , 0.000000000)
tdSql.checkData(4 , 3 , 0.000000000)
- # # used for stable table
-
- tdSql.query("select pow(c1, 2) from stb1")
+ # # used for stable table
+
+ tdSql.query(f"select pow(c1, 2) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select pow(c1, 2) from stbbb1")
- tdSql.error("select pow(c1, 2) from tbname")
- tdSql.error("select pow(c1, 2) from ct5")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.stbbb1")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.tbname")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.ct5")
- # mix with common col
- tdSql.query("select c1, pow(c1 ,2) from ct1")
+ # mix with common col
+ tdSql.query(f"select c1, pow(c1 ,2) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,64.000000000)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0.000000000)
- tdSql.query("select c1, pow(c1,2) from ct4")
+ tdSql.query(f"select c1, pow(c1,2) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
@@ -377,45 +300,45 @@ class TDTestCase:
tdSql.checkData(5 , 1 ,None)
# mix with common functions
- tdSql.query("select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from ct4 ")
+ tdSql.query(f"select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,36.000000000)
tdSql.checkData(3 , 2 ,36.000000000)
tdSql.checkData(3 , 3 ,5.169925001)
- tdSql.query("select c1, pow(c1,1),c5, floor(c5 ) from stb1 ")
+ tdSql.query(f"select c1, pow(c1,1),c5, floor(c5 ) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from stb1 ")
- tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from ct1 ")
- tdSql.error("select pow(c1 ,2), count(c5) from stb1 ")
- tdSql.error("select pow(c1 ,2), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
+
-
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
- # # bug fix for compute
- tdSql.query("select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from ct4 ")
+ # # bug fix for compute
+ tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -423,7 +346,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 64.000000000)
tdSql.checkData(1, 2, 16.000000000)
- tdSql.query(" select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from ct4")
+ tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -431,87 +354,86 @@ class TDTestCase:
tdSql.checkData(1, 1, 64.000000000)
tdSql.checkData(1, 2, 62.310000000)
- tdSql.query("select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from ct1")
+ tdSql.query(f"select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, pow(c1, 100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def pow_base_test(self):
+ def pow_base_test(self, dbname="db"):
# base is an regular number ,int or double
- tdSql.query("select c1, pow(c1, 2) from ct1")
+ tdSql.query(f"select c1, pow(c1, 2) from {dbname}.ct1")
tdSql.checkData(0, 1,64.000000000)
- tdSql.query("select c1, pow(c1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(c1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 64.000000000)
- tdSql.query("select c1, pow(1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# # bug for compute in functions
- # tdSql.query("select c1, abs(1/0) from ct1")
+ # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
- tdSql.query("select c1, pow(1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# two cols start pow(x,y)
- tdSql.query("select c1,c2, pow(c1,c2) from ct1")
+ tdSql.query(f"select c1,c2, pow(c1,c2) from {dbname}.ct1")
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 2, None)
tdSql.checkData(4, 2, 1.000000000)
- tdSql.query("select c1,c2, pow(c2,c1) from ct1")
+ tdSql.query(f"select c1,c2, pow(c2,c1) from {dbname}.ct1")
tdSql.checkData(0, 2, 3897131646727578700481513520437089271808.000000000)
tdSql.checkData(1, 2, 17217033054561120738612297152331776.000000000)
tdSql.checkData(4, 2, 1.000000000)
- tdSql.query("select c1, pow(2.0 , c1) from ct1")
+ tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1")
tdSql.checkData(0, 1, 256.000000000)
tdSql.checkData(1, 1, 128.000000000)
tdSql.checkData(4, 1, 1.000000000)
- tdSql.query("select c1, pow(2.0 , c1) from ct1")
+ tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1")
tdSql.checkData(0, 1, 256.000000000)
tdSql.checkData(1, 1, 128.000000000)
tdSql.checkData(4, 1, 1.000000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -519,7 +441,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,64.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -527,7 +449,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,25.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -535,7 +457,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,25.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select c5 from stb1 where c1 > 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 order by ts " , "select pow(t1,2), pow(c5,2) from stb1 order by ts" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) , pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- pass
-
-
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by ts " , f"select pow(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by ts " , f"select pow(t1,2), pow(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) , pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: pow basic query ============")
+ tdLog.printNoPrefix("==========step4: pow basic query ============")
self.basic_pow_function()
- tdLog.printNoPrefix("==========step5: big number pow query ============")
+ tdLog.printNoPrefix("==========step5: big number pow query ============")
self.test_big_number()
- tdLog.printNoPrefix("==========step6: base number for pow query ============")
+ tdLog.printNoPrefix("==========step6: base number for pow query ============")
self.pow_base_test()
- tdLog.printNoPrefix("==========step7: pow boundary query ============")
+ tdLog.printNoPrefix("==========step7: pow boundary query ============")
self.check_boundary_values()
- tdLog.printNoPrefix("==========step8: pow filter query ============")
+ tdLog.printNoPrefix("==========step8: pow filter query ============")
self.abs_func_filter()
tdLog.printNoPrefix("==========step9: check pow result of stable query ============")
- self.support_super_table_test()
+ self.support_super_table_test()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/qnodeCluster.py b/tests/system-test/2-query/qnodeCluster.py
index f68eb58a7a0820333b50258cf7cd29d860153cac..9e49bff9389deeb83839477c98e194c014a2a87f 100644
--- a/tests/system-test/2-query/qnodeCluster.py
+++ b/tests/system-test/2-query/qnodeCluster.py
@@ -13,9 +13,9 @@ from util.common import *
sys.path.append("./6-cluster/")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
-import threading
+import threading
class TDTestCase:
@@ -28,7 +28,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
@@ -47,7 +47,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -55,7 +55,7 @@ class TDTestCase:
dbname="db_tsbs"
stabname1="readings"
stabname2="diagnostics"
- ctbnamePre1="rct"
+ ctbnamePre1="rct"
ctbnamePre2="dct"
ctbNums=40
self.ctbNums=ctbNums
@@ -73,7 +73,7 @@ class TDTestCase:
self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums)
- for j in range(ctbNums):
+ for j in range(ctbNums):
for i in range(rowNUms):
tdSql.execute(
f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
@@ -109,19 +109,19 @@ class TDTestCase:
def tsbsIotQuery(self,tdSql):
tdSql.execute("use db_tsbs")
-
+
# test interval and partition
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
# print(tdSql.queryResult)
parRows=tdSql.queryRows
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
tdSql.checkRows(parRows)
-
-
- # # test insert into
+
+
+ # # test insert into
# tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
# tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
-
+
# tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
@@ -141,7 +141,7 @@ class TDTestCase:
tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
- # 2 stationary-trucks
+ # 2 stationary-trucks
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
@@ -156,7 +156,7 @@ class TDTestCase:
tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
- # # 6. avg-daily-driving-session
+ # # 6. avg-daily-driving-session
# #taosc core dumped
# tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
# tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
@@ -166,7 +166,7 @@ class TDTestCase:
# 7. avg-load
tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
- # 8. daily-activity
+ # 8. daily-activity
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
@@ -184,7 +184,7 @@ class TDTestCase:
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
-
+
#it's already supported:
# last-loc
tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
@@ -192,7 +192,7 @@ class TDTestCase:
#2. low-fuel
tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
-
+
# 3. avg-vs-projected-fuel-consumption
tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet")
@@ -213,16 +213,16 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
-
+
tdSql.query("select * from information_schema.ins_dnodes;")
tdLog.debug(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
- tdLog.info("create database and stable")
+ tdLog.info("create database and stable")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
@@ -234,7 +234,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping %s "%stopRole)
+ tdLog.info("Take turns stopping %s "%stopRole)
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@@ -242,7 +242,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -254,7 +254,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -265,12 +265,12 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
- def run(self):
+ def run(self):
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
self.createCluster()
self.prepareData()
diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py
index e0fb986d79d8491bf2bd23e82ccde85914c76541..af3fbb83c070202368f317b119377035ac133e16 100644
--- a/tests/system-test/2-query/query_cols_tags_and_or.py
+++ b/tests/system-test/2-query/query_cols_tags_and_or.py
@@ -19,7 +19,7 @@ class TDTestCase:
def init(self, conn, logSql):
## add for TD-6672
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
def insertData(self, tb_name):
insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)',
@@ -37,17 +37,17 @@ class TDTestCase:
for sql in insert_sql_list:
tdSql.execute(sql)
- def initTb(self):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initTb(self, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)")
self.insertData(tb_name)
return tb_name
- def initStb(self, count=5):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initStb(self, count=5, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)")
for i in range(1, count+1):
@@ -56,9 +56,10 @@ class TDTestCase:
self.insertData(f'{tb_name}_sub_{i}')
return tb_name
- def initTwoStb(self):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initTwoStb(self, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
+ # tb_name = tdCom.getLongName(8, "letters")
tb_name1 = f'{tb_name}1'
tb_name2 = f'{tb_name}2'
tdSql.execute(
diff --git a/tests/system-test/2-query/round.py b/tests/system-test/2-query/round.py
index 551e225a4d02025780b4238e2079b70249dcdd5a..1d69d3c9afa1d7fffc3b8eac80c2b648a54bc74e 100644
--- a/tests/system-test/2-query/round.py
+++ b/tests/system-test/2-query/round.py
@@ -8,49 +8,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -94,68 +91,68 @@ class TDTestCase:
else:
tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select round from t1",
- # "select round(-+--+c1) from t1",
- # "select +-round(c1) from t1",
- # "select ++-round(c1) from t1",
- # "select ++--round(c1) from t1",
- # "select - -round(c1)*0 from t1",
- # "select round(tbname+1) from t1 ",
- "select round(123--123)==1 from t1",
- "select round(c1) as 'd1' from t1",
- "select round(c1 ,c2 ) from t1",
- "select round(c1 ,NULL) from t1",
- "select round(,) from t1;",
- "select round(round(c1) ab from t1)",
- "select round(c1) as int from t1",
- "select round from stb1",
- # "select round(-+--+c1) from stb1",
- # "select +-round(c1) from stb1",
- # "select ++-round(c1) from stb1",
- # "select ++--round(c1) from stb1",
- # "select - -round(c1)*0 from stb1",
- # "select round(tbname+1) from stb1 ",
- "select round(123--123)==1 from stb1",
- "select round(c1) as 'd1' from stb1",
- "select round(c1 ,c2 ) from stb1",
- "select round(c1 ,NULL) from stb1",
- "select round(,) from stb1;",
- "select round(round(c1) ab from stb1)",
- "select round(c1) as int from stb1"
+ f"select round from {dbname}.t1",
+ # f"select round(-+--+c1) from {dbname}.t1",
+ # f"select +-round(c1) from {dbname}.t1",
+ # f"select ++-round(c1) from {dbname}.t1",
+ # f"select ++--round(c1) from {dbname}.t1",
+ # f"select - -round(c1)*0 from {dbname}.t1",
+ # f"select round(tbname+1) from {dbname}.t1 ",
+ f"select round(123--123)==1 from {dbname}.t1",
+ f"select round(c1) as 'd1' from {dbname}.t1",
+ f"select round(c1 ,c2 ) from {dbname}.t1",
+ f"select round(c1 ,NULL) from {dbname}.t1",
+ f"select round(,) from {dbname}.t1;",
+ f"select round(round(c1) ab from {dbname}.t1)",
+ f"select round(c1) as int from {dbname}.t1",
+ f"select round from {dbname}.stb1",
+ # f"select round(-+--+c1) from {dbname}.stb1",
+ # f"select +-round(c1) from {dbname}.stb1",
+ # f"select ++-round(c1) from {dbname}.stb1",
+ # f"select ++--round(c1) from {dbname}.stb1",
+ # f"select - -round(c1)*0 from {dbname}.stb1",
+ # f"select round(tbname+1) from {dbname}.stb1 ",
+ f"select round(123--123)==1 from {dbname}.stb1",
+ f"select round(c1) as 'd1' from {dbname}.stb1",
+ f"select round(c1 ,c2 ) from {dbname}.stb1",
+ f"select round(c1 ,NULL) from {dbname}.stb1",
+ f"select round(,) from {dbname}.stb1;",
+ f"select round(round(c1) ab from {dbname}.stb1)",
+ f"select round(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select round(ts) from t1" ,
- "select round(c7) from t1",
- "select round(c8) from t1",
- "select round(c9) from t1",
- "select round(ts) from ct1" ,
- "select round(c7) from ct1",
- "select round(c8) from ct1",
- "select round(c9) from ct1",
- "select round(ts) from ct3" ,
- "select round(c7) from ct3",
- "select round(c8) from ct3",
- "select round(c9) from ct3",
- "select round(ts) from ct4" ,
- "select round(c7) from ct4",
- "select round(c8) from ct4",
- "select round(c9) from ct4",
- "select round(ts) from stb1" ,
- "select round(c7) from stb1",
- "select round(c8) from stb1",
- "select round(c9) from stb1" ,
-
- "select round(ts) from stbbb1" ,
- "select round(c7) from stbbb1",
-
- "select round(ts) from tbname",
- "select round(c9) from tbname"
+ f"select round(ts) from {dbname}.t1" ,
+ f"select round(c7) from {dbname}.t1",
+ f"select round(c8) from {dbname}.t1",
+ f"select round(c9) from {dbname}.t1",
+ f"select round(ts) from {dbname}.ct1" ,
+ f"select round(c7) from {dbname}.ct1",
+ f"select round(c8) from {dbname}.ct1",
+ f"select round(c9) from {dbname}.ct1",
+ f"select round(ts) from {dbname}.ct3" ,
+ f"select round(c7) from {dbname}.ct3",
+ f"select round(c8) from {dbname}.ct3",
+ f"select round(c9) from {dbname}.ct3",
+ f"select round(ts) from {dbname}.ct4" ,
+ f"select round(c7) from {dbname}.ct4",
+ f"select round(c8) from {dbname}.ct4",
+ f"select round(c9) from {dbname}.ct4",
+ f"select round(ts) from {dbname}.stb1" ,
+ f"select round(c7) from {dbname}.stb1",
+ f"select round(c8) from {dbname}.stb1",
+ f"select round(c9) from {dbname}.stb1" ,
+
+ f"select round(ts) from {dbname}.stbbb1" ,
+ f"select round(c7) from {dbname}.stbbb1",
+
+ f"select round(ts) from {dbname}.tbname",
+ f"select round(c9) from {dbname}.tbname"
]
@@ -164,127 +161,127 @@ class TDTestCase:
type_sql_lists = [
- "select round(c1) from t1",
- "select round(c2) from t1",
- "select round(c3) from t1",
- "select round(c4) from t1",
- "select round(c5) from t1",
- "select round(c6) from t1",
-
- "select round(c1) from ct1",
- "select round(c2) from ct1",
- "select round(c3) from ct1",
- "select round(c4) from ct1",
- "select round(c5) from ct1",
- "select round(c6) from ct1",
-
- "select round(c1) from ct3",
- "select round(c2) from ct3",
- "select round(c3) from ct3",
- "select round(c4) from ct3",
- "select round(c5) from ct3",
- "select round(c6) from ct3",
-
- "select round(c1) from stb1",
- "select round(c2) from stb1",
- "select round(c3) from stb1",
- "select round(c4) from stb1",
- "select round(c5) from stb1",
- "select round(c6) from stb1",
-
- "select round(c6) as alisb from stb1",
- "select round(c6) alisb from stb1",
+ f"select round(c1) from {dbname}.t1",
+ f"select round(c2) from {dbname}.t1",
+ f"select round(c3) from {dbname}.t1",
+ f"select round(c4) from {dbname}.t1",
+ f"select round(c5) from {dbname}.t1",
+ f"select round(c6) from {dbname}.t1",
+
+ f"select round(c1) from {dbname}.ct1",
+ f"select round(c2) from {dbname}.ct1",
+ f"select round(c3) from {dbname}.ct1",
+ f"select round(c4) from {dbname}.ct1",
+ f"select round(c5) from {dbname}.ct1",
+ f"select round(c6) from {dbname}.ct1",
+
+ f"select round(c1) from {dbname}.ct3",
+ f"select round(c2) from {dbname}.ct3",
+ f"select round(c3) from {dbname}.ct3",
+ f"select round(c4) from {dbname}.ct3",
+ f"select round(c5) from {dbname}.ct3",
+ f"select round(c6) from {dbname}.ct3",
+
+ f"select round(c1) from {dbname}.stb1",
+ f"select round(c2) from {dbname}.stb1",
+ f"select round(c3) from {dbname}.stb1",
+ f"select round(c4) from {dbname}.stb1",
+ f"select round(c5) from {dbname}.stb1",
+ f"select round(c6) from {dbname}.stb1",
+
+ f"select round(c6) as alisb from {dbname}.stb1",
+ f"select round(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_round_function(self):
+ def basic_round_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select round(c1) from ct3")
+ tdSql.query(f"select round(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c2) from ct3")
+ tdSql.query(f"select round(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c3) from ct3")
+ tdSql.query(f"select round(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c4) from ct3")
+ tdSql.query(f"select round(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c5) from ct3")
+ tdSql.query(f"select round(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c6) from ct3")
+ tdSql.query(f"select round(c6) from {dbname}.ct3")
# used for regular table
- tdSql.query("select round(c1) from t1")
+ tdSql.query(f"select round(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1)
tdSql.checkData(3 , 0, 3)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from t1")
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.t1")
# used for sub table
- tdSql.query("select round(c1) from ct1")
+ tdSql.query(f"select round(c1) from {dbname}.ct1")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1 , 0, 7)
tdSql.checkData(3 , 0, 5)
tdSql.checkData(5 , 0, 4)
- tdSql.query("select round(c1) from ct1")
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct1")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
+ tdSql.query(f"select round(c1) from {dbname}.ct1")
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct1")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct1;",f"select c1 from {dbname}.ct1" )
# used for stable table
- tdSql.query("select round(c1) from stb1")
+ tdSql.query(f"select round(c1) from {dbname}.stb1")
tdSql.checkRows(25)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct4")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct4 ", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct4")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct4;" , f"select c1 from {dbname}.ct4" )
# used for not exists table
- tdSql.error("select round(c1) from stbbb1")
- tdSql.error("select round(c1) from tbname")
- tdSql.error("select round(c1) from ct5")
+ tdSql.error(f"select round(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select round(c1) from {dbname}.tbname")
+ tdSql.error(f"select round(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, round(c1) from ct1")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,8)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0)
- tdSql.query("select c1, round(c1) from ct4")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,5)
tdSql.checkData(5 , 0 ,None)
tdSql.checkData(5 , 1 ,None)
- tdSql.query("select c1, round(c1) from ct4 ")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,5)
# mix with common functions
- tdSql.query("select c1, round(c1),c5, round(c5) from ct4 ")
+ tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -300,34 +297,34 @@ class TDTestCase:
tdSql.checkData(6 , 2 ,4.44000)
tdSql.checkData(6 , 3 ,4.00000)
- tdSql.query("select c1, round(c1),c5, round(c5) from stb1 ")
+ tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.stb1 ")
# mix with agg functions , not support
- tdSql.error("select c1, round(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, round(c1),c5, count(c5) from ct1 ")
- tdSql.error("select round(c1), count(c5) from stb1 ")
- tdSql.error("select round(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select round(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select round(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# bug fix for compute
- tdSql.query("select c1, abs(c1) -0 ,round(c1)-0 from ct4 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,round(c1)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -335,7 +332,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 8.000000000)
- tdSql.query(" select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -343,9 +340,8 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 7.900000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -353,7 +349,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -361,7 +357,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -369,7 +365,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from ct4 where c1>log(c1,2) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -382,44 +378,42 @@ class TDTestCase:
def round_Arithmetic(self):
pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from sub1_bound")
- self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from sub1_bound")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from sub1_bound;" , "select round(c1) from sub1_bound" )
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from {dbname}.sub1_bound")
+ self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from {dbname}.sub1_bound")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.sub1_bound;" , f"select round(c1) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from sub1_bound ")
+ tdSql.query(f"select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, 2147483647.000000000)
tdSql.checkData(0, 2, 32767.000000000)
tdSql.checkData(0, 3, 127.000000000)
@@ -430,19 +424,18 @@ class TDTestCase:
tdSql.checkData(4, 3, -123.000000000)
tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000)
- self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from sub1_bound ")
+ self.check_result_auto(f"select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from {dbname}.sub1_bound" ,f"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from {dbname}.sub1_bound ")
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto( " select c5 from stb1 order by ts " , "select round(c5) from stb1 order by ts" )
- self.check_result_auto( " select c5 from stb1 order by tbname " , "select round(c5) from stb1 order by tbname" )
- self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto( f"select c5 from {dbname}.stb1 order by ts " , f"select round(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 order by tbname " , f"select round(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 order by ts " , "select round(t1), round(c5) from stb1 order by ts" )
- self.check_result_auto( " select t1,c5 from stb1 order by tbname " , "select round(t1) ,round(c5) from stb1 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) ,round(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) , round(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select round(t1), round(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) , round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
diff --git a/tests/system-test/2-query/rtrim.py b/tests/system-test/2-query/rtrim.py
index 30624792cc33866a19c0ec1a31594cdfa438ffcf..80307e8534787889b080baa0c25a32b638c49461 100644
--- a/tests/system-test/2-query/rtrim.py
+++ b/tests/system-test/2-query/rtrim.py
@@ -120,16 +120,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__rtrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__rtrim_err_check(tb):
@@ -142,17 +142,15 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
-
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -162,29 +160,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -200,7 +198,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -216,13 +214,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -251,8 +249,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py
index 45be0ef8abc8e32c1dbe904fb2dcd225f3db5f94..7f1d7ab8c0d62fb4db7386caf3b4eeca4b3f8cba 100644
--- a/tests/system-test/2-query/sample.py
+++ b/tests/system-test/2-query/sample.py
@@ -11,21 +11,17 @@
# -*- coding: utf-8 -*-
-from pstats import Stats
import sys
-import subprocess
import random
-import math
-import numpy as np
-import inspect
import re
-import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
+DBNAME = "db"
+
class TDTestCase:
def init(self, conn, logSql):
@@ -33,11 +29,11 @@ class TDTestCase:
tdSql.init(conn.cursor())
self.ts = 1537146000000
- def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
+ def sample_query_form(self, sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
'''
sample function:
- :param sel: string, must be "select", required parameters;
+ :param sel: string, must be f"select", required parameters;
:param func: string, in this case must be "sample(", otherwise return other function, required parameters;
:param col: string, column name, required parameters;
:param m_comm: string, comma between col and k , required parameters;
@@ -47,12 +43,12 @@ class TDTestCase:
:param fr: string, must be "from", required parameters;
:param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
:param condition: expression;
- :return: sample query statement,default: select sample(c1, 1) from t1
+ :return: sample query statement,default: select sample(c1, 1) from {dbname}.t1
'''
return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}"
- def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
+ def checksample(self,sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{DBNAME}.t1", condition=""):
# print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
# table_expr=table_expr, condition=condition))
line = sys._getframe().f_back.f_lineno
@@ -65,7 +61,7 @@ class TDTestCase:
))
- sql = "select * from t1"
+ sql = f"select * from {table_expr}"
collist = tdSql.getColNameList(sql)
if not isinstance(col, str):
@@ -125,7 +121,7 @@ class TDTestCase:
# table_expr=table_expr, condition=condition
# ))
- if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]):
+ if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != f"select"]):
print(f"case in {line}: ", end='')
return tdSql.error(self.sample_query_form(
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
@@ -286,14 +282,14 @@ class TDTestCase:
return
else:
- if "where" in condition:
- condition = re.sub('where', f"where {col} is not null and ", condition)
- else:
- condition = f"where {col} is not null" + condition
- print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
- tdSql.query(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
+ # if "where" in condition:
+ # condition = re.sub('where', f"where {col} is not null and ", condition)
+ # else:
+ # condition = f"where {col} is not null" + condition
+ # print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
+ # tdSql.query(f"select _c0, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
# offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0
- pre_sample = tdSql.queryResult
+ # pre_sample = tdSql.queryResult
# pre_len = tdSql.queryRows
# for i in range(sample_len):
# if sample_result[pre_row:pre_row + step][i] not in pre_sample:
@@ -301,7 +297,7 @@ class TDTestCase:
# else:
# tdLog.info(f"case in {line} is success: sample data is in {group_name}")
- pass
+ pass
def sample_current_query(self) :
@@ -322,24 +318,24 @@ class TDTestCase:
self.checksample(**case6)
# # case7~8: nested query
- # case7 = {"table_expr": "(select c1 from stb1)"}
- # self.checksample(**case7)
- # case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"}
- # self.checksample(**case8)
+ case7 = {"table_expr": f"(select c1 from {DBNAME}.stb1)"}
+ self.checksample(**case7)
+ case8 = {"table_expr": f"(select sample(c1, 1) c1 from {DBNAME}.stb1 group by tbname)"}
+ self.checksample(**case8)
# case9~10: mix with tbname/ts/tag/col
- # case9 = {"alias": ", tbname"}
- # self.checksample(**case9)
- # case10 = {"alias": ", _c0"}
- # self.checksample(**case10)
- # case11 = {"alias": ", st1"}
+ case9 = {"alias": ", tbname"}
+ self.checksample(**case9)
+ case10 = {"alias": ", _c0"}
+ self.checksample(**case10)
+ case11 = {"alias": ", st1"}
# self.checksample(**case11)
- tdSql.query("select sample( c1 , 1 ) , st1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , st1 from {DBNAME}.t1")
- # case12 = {"alias": ", c1"}
+ case12 = {"alias": ", c1"}
# self.checksample(**case12)
- tdSql.query("select sample( c1 , 1 ) , c1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1")
# case13~15: with single condition
case13 = {"condition": "where c1 <= 10"}
@@ -353,32 +349,31 @@ class TDTestCase:
case16 = {"condition": "where c6=1 or c6 =0"}
self.checksample(**case16)
- # # case17: only support normal table join
- # case17 = {
- # "col": "t1.c1",
- # "table_expr": "t1, t2",
- # "condition": "where t1.ts=t2.ts"
- # }
- # self.checksample(**case17)
- # # case18~19: with group by
- # case19 = {
- # "table_expr": "stb1",
- # "condition": "partition by tbname"
- # }
+ # case17: only support normal table join
+ case17 = {
+ "col": "t1.c1",
+ "table_expr": f"{DBNAME}.t1 t1 join {DBNAME}.t2 t2 on t1.ts = t2.ts",
+ }
+ self.checksample(**case17)
+ # case18~19: with group by
+ case19 = {
+ "table_expr": f"{DBNAME}.stb1",
+ "condition": "partition by tbname"
+ }
# self.checksample(**case19)
- # # case20~21: with order by
- # case20 = {"condition": "order by ts"}
+ # case20~21: with order by
+ case20 = {"condition": "order by ts"}
# self.checksample(**case20)
- # case21 = {
- # "table_expr": "stb1",
- # "condition": "partition by tbname order by tbname"
- # }
+ case21 = {
+ "table_expr": f"{DBNAME}.stb1",
+ "condition": "partition by tbname order by tbname"
+ }
# self.checksample(**case21)
# case22: with union
case22 = {
- "condition": "union all select sample( c1 , 1 ) from t2"
+ "condition": f"union all select sample( c1 , 1 ) from {DBNAME}.t2"
}
self.checksample(**case22)
@@ -396,12 +391,12 @@ class TDTestCase:
case26 = {"k": 1000}
self.checksample(**case26)
case27 = {
- "table_expr": "stb1",
+ "table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 "
}
self.checksample(**case27) # with slimit
case28 = {
- "table_expr": "stb1",
+ "table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 soffset 1"
}
self.checksample(**case28) # with soffset
@@ -431,7 +426,7 @@ class TDTestCase:
# err9 = {"col": "st1"}
# self.checksample(**err9) # col: tag
- tdSql.query(" select sample(st1 ,1) from t1 ")
+ tdSql.query(f"select sample(st1 ,1) from {DBNAME}.t1 ")
# err10 = {"col": 1}
# self.checksample(**err10) # col: value
# err11 = {"col": "NULL"}
@@ -494,13 +489,13 @@ class TDTestCase:
self.checksample(**err39) # mix with calculation function 2
# err40 = {"alias": "+ 2"}
# self.checksample(**err40) # mix with arithmetic 1
- # tdSql.query(" select sample(c1 , 1) + 2 from t1 ")
+ # tdSql.query(f"select sample(c1 , 1) + 2 from {dbname}.t1 ")
err41 = {"alias": "+ avg(c1)"}
# self.checksample(**err41) # mix with arithmetic 2
# err42 = {"alias": ", c1"}
# self.checksample(**err42)
- tdSql.query("select sample( c1 , 1 ) , c1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1")
# mix with other col
# err43 = {"table_expr": "stb1"}
# self.checksample(**err43) # select stb directly
@@ -510,14 +505,14 @@ class TDTestCase:
# "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
# }
# self.checksample(**err44) # stb join
- tdSql.query("select sample( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts")
+ tdSql.query(f"select sample( stb1.c1 , 1 ) from {DBNAME}.stb1 stb1, {DBNAME}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts")
# err45 = {
# "condition": "where ts>0 and ts < now interval(1h) fill(next)"
# }
# self.checksample(**err45) # interval
- tdSql.error("select sample( c1 , 1 ) from t1 where ts>0 and ts < now interval(1h) fill(next)")
+ tdSql.error(f"select sample( c1 , 1 ) from {DBNAME}.t1 where ts>0 and ts < now interval(1h) fill(next)")
err46 = {
- "table_expr": "t1",
+ "table_expr": f"{DBNAME}.t1",
"condition": "group by c6"
}
# self.checksample(**err46) # group by normal col
@@ -563,49 +558,45 @@ class TDTestCase:
pass
- def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None :
+ def sample_test_data(self, tbnum:int, data_row:int, basetime:int, dbname="db") -> None :
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into {dbname}.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into {dbname}.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
- f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
pass
- def sample_test_table(self,tbnum: int) -> None :
- tdSql.execute("drop database if exists db")
- tdSql.execute("create database if not exists db keep 3650")
- tdSql.execute("use db")
+ def sample_test_table(self,tbnum: int, dbname="db") -> None :
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650")
tdSql.execute(
- "create stable db.stb1 (\
+ f"create stable {dbname}.stb1 (\
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
) \
tags(st1 int)"
)
tdSql.execute(
- "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(st2 int)"
)
for i in range(tbnum):
- tdSql.execute(f"create table t{i} using stb1 tags({i})")
- tdSql.execute(f"create table tt{i} using stb2 tags({i})")
-
- pass
-
+ tdSql.execute(f"create table {dbname}.t{i} using {dbname}.stb1 tags({i})")
+ tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})")
def check_sample(self , sample_query , origin_query ):
@@ -626,45 +617,43 @@ class TDTestCase:
else:
tdLog.exit(" sample data is not in datas groups ,failed sql is : %s" % sample_query )
-
- def basic_sample_query(self):
- tdSql.execute(" drop database if exists db ")
- tdSql.execute(" create database if not exists db duration 300d ")
- tdSql.execute(" use db ")
+ def basic_sample_query(self, dbname="db"):
+ tdSql.execute(f" drop database if exists {dbname} ")
+ tdSql.execute(f" create database if not exists {dbname} duration 300d ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -683,116 +672,116 @@ class TDTestCase:
# basic query for sample
# params test for all
- tdSql.error(" select sample(c1,c1) from t1 ")
- tdSql.error(" select sample(c1,now) from t1 ")
- tdSql.error(" select sample(c1,tbname) from t1 ")
- tdSql.error(" select sample(c1,ts) from t1 ")
- tdSql.error(" select sample(c1,false) from t1 ")
- tdSql.query(" select sample(123,1) from t1 ")
-
- tdSql.query(" select sample(c1,2) from t1 ")
+ tdSql.error(f"select sample(c1,c1) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,now) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,tbname) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,ts) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,false) from {dbname}.t1 ")
+ tdSql.query(f"select sample(123,1) from {dbname}.t1 ")
+
+ tdSql.query(f"select sample(c1,2) from {dbname}.t1 ")
tdSql.checkRows(2)
- tdSql.query(" select sample(c1,10) from t1 ")
+ tdSql.query(f"select sample(c1,10) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8,10) from t1 ")
+ tdSql.query(f"select sample(c8,10) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c1,999) from t1 ")
+ tdSql.query(f"select sample(c1,999) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c1,1000) from t1 ")
+ tdSql.query(f"select sample(c1,1000) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8,1000) from t1 ")
+ tdSql.query(f"select sample(c8,1000) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.error(" select sample(c1,-1) from t1 ")
+ tdSql.error(f"select sample(c1,-1) from {dbname}.t1 ")
# bug need fix
- # tdSql.query("select sample(c1 ,2) , 123 from stb1;")
+ # tdSql.query(f"select sample(c1 ,2) , 123 from {dbname}.stb1;")
# all type support
- tdSql.query(" select sample(c1 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c1 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c2 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c2 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c3 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c3 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c4 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c4 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c5 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c5 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c6 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c6 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c7 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c7 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c8 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c9 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c9 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c10 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c10 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- # tdSql.query(" select sample(t1 , 20 ) from ct1 ")
+ # tdSql.query(f"select sample(t1 , 20 ) from {dbname}.ct1 ")
# tdSql.checkRows(13)
# filter data
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 is null ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 is null ")
tdSql.checkRows(1)
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 =6 ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 =6 ")
tdSql.checkRows(1)
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 > 6 ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6 ")
tdSql.checkRows(3)
- self.check_sample("select sample(c1, 20 ) from t1 where c1 > 6" , "select c1 from t1 where c1 > 6")
+ self.check_sample(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6" , f"select c1 from {dbname}.t1 where c1 > 6")
- tdSql.query(" select sample( c1 , 1 ) from t1 where c1 in (0, 1,2) ")
+ tdSql.query(f"select sample( c1 , 1 ) from {dbname}.t1 where c1 in (0, 1,2) ")
tdSql.checkRows(1)
- tdSql.query("select sample( c1 ,3 ) from t1 where c1 between 1 and 10 ")
+ tdSql.query(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10 ")
tdSql.checkRows(3)
- self.check_sample("select sample( c1 ,3 ) from t1 where c1 between 1 and 10" ,"select c1 from t1 where c1 between 1 and 10")
+ self.check_sample(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10" ,f"select c1 from {dbname}.t1 where c1 between 1 and 10")
# join
- tdSql.query("select sample( ct4.c1 , 1 ) from ct1, ct4 where ct4.ts=ct1.ts")
+ tdSql.query(f"select sample( ct4.c1 , 1 ) from {dbname}.ct1 ct1, {dbname}.ct4 ct4 where ct4.ts=ct1.ts")
# partition by tbname
- tdSql.query("select sample(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select sample(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- self.check_sample("select sample(c1,2) from stb1 partition by tbname" , "select c1 from stb1 partition by tbname")
+ self.check_sample(f"select sample(c1,2) from {dbname}.stb1 partition by tbname" , f"select c1 from {dbname}.stb1 partition by tbname")
# nest query
- # tdSql.query("select sample(c1,2) from (select c1 from t1); ")
+ # tdSql.query(f"select sample(c1,2) from (select c1 from {dbname}.t1); ")
# tdSql.checkRows(2)
# union all
- tdSql.query("select sample(c1,2) from t1 union all select sample(c1,3) from t1")
+ tdSql.query(f"select sample(c1,2) from {dbname}.t1 union all select sample(c1,3) from {dbname}.t1")
tdSql.checkRows(5)
# fill interval
# not support mix with other function
- tdSql.error("select top(c1,2) , sample(c1,2) from ct1")
- tdSql.error("select max(c1) , sample(c1,2) from ct1")
- tdSql.query("select c1 , sample(c1,2) from ct1")
+ tdSql.error(f"select top(c1,2) , sample(c1,2) from {dbname}.ct1")
+ tdSql.error(f"select max(c1) , sample(c1,2) from {dbname}.ct1")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.ct1")
# bug for mix with scalar
- tdSql.query("select 123 , sample(c1,100) from ct1")
- tdSql.query("select sample(c1,100)+2 from ct1")
- tdSql.query("select abs(sample(c1,100)) from ct1")
+ tdSql.query(f"select 123 , sample(c1,100) from {dbname}.ct1")
+ tdSql.query(f"select sample(c1,100)+2 from {dbname}.ct1")
+ tdSql.query(f"select abs(sample(c1,100)) from {dbname}.ct1")
- def sample_test_run(self) :
+ def sample_test_run(self, dbname="db") :
tdLog.printNoPrefix("==========support sample function==========")
tbnum = 10
nowtime = int(round(time.time() * 1000))
@@ -805,28 +794,28 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5})")
self.sample_current_query()
self.sample_error_query()
tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
- # self.sample_test_table(tbnum)
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
- # self.sample_current_query()
- # self.sample_error_query()
+ self.sample_test_table(tbnum)
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
+ self.sample_current_query()
+ self.sample_error_query()
tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):")
- # self.sample_test_table(tbnum)
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
- # self.sample_current_query()
- # self.sample_error_query()
+ self.sample_test_table(tbnum)
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
+ self.sample_current_query()
+ self.sample_error_query()
tdLog.printNoPrefix("######## insert data without NULL data test:")
self.sample_test_table(tbnum)
@@ -837,16 +826,16 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
self.sample_current_query()
self.sample_error_query()
tdLog.printNoPrefix("######## check after WAL test:")
- tdSql.query("select * from information_schema.ins_dnodes")
+ tdSql.query(f"select * from information_schema.ins_dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
@@ -855,19 +844,19 @@ class TDTestCase:
self.basic_sample_query()
- def sample_big_data(self):
- tdSql.execute("create database sample_db")
+ def sample_big_data(self, dbname="sample_db"):
+ tdSql.execute(f"create database {dbname}")
tdSql.execute("use sample_db")
- tdSql.execute("create stable st (ts timestamp ,c1 int ) tags(ind int)" )
- tdSql.execute("create table sub_tb using st tags(1)")
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp ,c1 int ) tags(ind int)" )
+ tdSql.execute(f"create table {dbname}.sub_tb using {dbname}.st tags(1)")
for i in range(2000):
ts = self.ts+i*10
- tdSql.execute(f"insert into sub_tb values({ts} ,{i})")
+ tdSql.execute(f"insert into {dbname}.sub_tb values({ts} ,{i})")
- tdSql.query("select count(*) from st")
+ tdSql.query(f"select count(*) from {dbname}.st")
tdSql.checkData(0,0,2000)
- tdSql.query("select sample(c1 ,1000) from st")
+ tdSql.query(f"select sample(c1 ,1000) from {dbname}.st")
tdSql.checkRows(1000)
# bug need fix
diff --git a/tests/system-test/2-query/sin.py b/tests/system-test/2-query/sin.py
index 7cb559c510f637c25fef6e7573ea44c92a2051bc..a1ba3354879eb9e1e0abe66ae445f7604734ad66 100644
--- a/tests/system-test/2-query/sin.py
+++ b/tests/system-test/2-query/sin.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -65,14 +63,15 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
+
def check_result_auto_sin(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
+
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
+
for row in origin_result:
row_check = []
for elem in row:
@@ -82,190 +81,179 @@ class TDTestCase:
elem = math.sin(elem)
row_check.append(elem)
auto_result.append(row_check)
-
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("sin function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("sin value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index])
+
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select sin from t1",
- # "select sin(-+--+c1 ) from t1",
- # "select +-sin(c1) from t1",
- # "select ++-sin(c1) from t1",
- # "select ++--sin(c1) from t1",
- # "select - -sin(c1)*0 from t1",
- # "select sin(tbname+1) from t1 ",
- "select sin(123--123)==1 from t1",
- "select sin(c1) as 'd1' from t1",
- "select sin(c1 ,c2) from t1",
- "select sin(c1 ,NULL ) from t1",
- "select sin(,) from t1;",
- "select sin(sin(c1) ab from t1)",
- "select sin(c1 ) as int from t1",
- "select sin from stb1",
- # "select sin(-+--+c1) from stb1",
- # "select +-sin(c1) from stb1",
- # "select ++-sin(c1) from stb1",
- # "select ++--sin(c1) from stb1",
- # "select - -sin(c1)*0 from stb1",
- # "select sin(tbname+1) from stb1 ",
- "select sin(123--123)==1 from stb1",
- "select sin(c1) as 'd1' from stb1",
- "select sin(c1 ,c2 ) from stb1",
- "select sin(c1 ,NULL) from stb1",
- "select sin(,) from stb1;",
- "select sin(sin(c1) ab from stb1)",
- "select sin(c1) as int from stb1"
+ f"select sin from {dbname}.t1",
+ # f"select sin(-+--+c1 ) from {dbname}.t1",
+ # f"select +-sin(c1) from {dbname}.t1",
+ # f"select ++-sin(c1) from {dbname}.t1",
+ # f"select ++--sin(c1) from {dbname}.t1",
+ # f"select - -sin(c1)*0 from {dbname}.t1",
+ # f"select sin(tbname+1) from {dbname}.t1 ",
+ f"select sin(123--123)==1 from {dbname}.t1",
+ f"select sin(c1) as 'd1' from {dbname}.t1",
+ f"select sin(c1 ,c2) from {dbname}.t1",
+ f"select sin(c1 ,NULL ) from {dbname}.t1",
+ f"select sin(,) from {dbname}.t1;",
+ f"select sin(sin(c1) ab from {dbname}.t1)",
+ f"select sin(c1 ) as int from {dbname}.t1",
+ f"select sin from {dbname}.stb1",
+ # f"select sin(-+--+c1) from {dbname}.stb1",
+ # f"select +-sin(c1) from {dbname}.stb1",
+ # f"select ++-sin(c1) from {dbname}.stb1",
+ # f"select ++--sin(c1) from {dbname}.stb1",
+ # f"select - -sin(c1)*0 from {dbname}.stb1",
+ # f"select sin(tbname+1) from {dbname}.stb1 ",
+ f"select sin(123--123)==1 from {dbname}.stb1",
+ f"select sin(c1) as 'd1' from {dbname}.stb1",
+ f"select sin(c1 ,c2 ) from {dbname}.stb1",
+ f"select sin(c1 ,NULL) from {dbname}.stb1",
+ f"select sin(,) from {dbname}.stb1;",
+ f"select sin(sin(c1) ab from {dbname}.stb1)",
+ f"select sin(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select sin(ts) from t1" ,
- "select sin(c7) from t1",
- "select sin(c8) from t1",
- "select sin(c9) from t1",
- "select sin(ts) from ct1" ,
- "select sin(c7) from ct1",
- "select sin(c8) from ct1",
- "select sin(c9) from ct1",
- "select sin(ts) from ct3" ,
- "select sin(c7) from ct3",
- "select sin(c8) from ct3",
- "select sin(c9) from ct3",
- "select sin(ts) from ct4" ,
- "select sin(c7) from ct4",
- "select sin(c8) from ct4",
- "select sin(c9) from ct4",
- "select sin(ts) from stb1" ,
- "select sin(c7) from stb1",
- "select sin(c8) from stb1",
- "select sin(c9) from stb1" ,
-
- "select sin(ts) from stbbb1" ,
- "select sin(c7) from stbbb1",
-
- "select sin(ts) from tbname",
- "select sin(c9) from tbname"
+ f"select sin(ts) from {dbname}.t1" ,
+ f"select sin(c7) from {dbname}.t1",
+ f"select sin(c8) from {dbname}.t1",
+ f"select sin(c9) from {dbname}.t1",
+ f"select sin(ts) from {dbname}.ct1" ,
+ f"select sin(c7) from {dbname}.ct1",
+ f"select sin(c8) from {dbname}.ct1",
+ f"select sin(c9) from {dbname}.ct1",
+ f"select sin(ts) from {dbname}.ct3" ,
+ f"select sin(c7) from {dbname}.ct3",
+ f"select sin(c8) from {dbname}.ct3",
+ f"select sin(c9) from {dbname}.ct3",
+ f"select sin(ts) from {dbname}.ct4" ,
+ f"select sin(c7) from {dbname}.ct4",
+ f"select sin(c8) from {dbname}.ct4",
+ f"select sin(c9) from {dbname}.ct4",
+ f"select sin(ts) from {dbname}.stb1" ,
+ f"select sin(c7) from {dbname}.stb1",
+ f"select sin(c8) from {dbname}.stb1",
+ f"select sin(c9) from {dbname}.stb1" ,
+
+ f"select sin(ts) from {dbname}.stbbb1" ,
+ f"select sin(c7) from {dbname}.stbbb1",
+
+ f"select sin(ts) from {dbname}.tbname",
+ f"select sin(c9) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select sin(c1) from t1",
- "select sin(c2) from t1",
- "select sin(c3) from t1",
- "select sin(c4) from t1",
- "select sin(c5) from t1",
- "select sin(c6) from t1",
-
- "select sin(c1) from ct1",
- "select sin(c2) from ct1",
- "select sin(c3) from ct1",
- "select sin(c4) from ct1",
- "select sin(c5) from ct1",
- "select sin(c6) from ct1",
-
- "select sin(c1) from ct3",
- "select sin(c2) from ct3",
- "select sin(c3) from ct3",
- "select sin(c4) from ct3",
- "select sin(c5) from ct3",
- "select sin(c6) from ct3",
-
- "select sin(c1) from stb1",
- "select sin(c2) from stb1",
- "select sin(c3) from stb1",
- "select sin(c4) from stb1",
- "select sin(c5) from stb1",
- "select sin(c6) from stb1",
-
- "select sin(c6) as alisb from stb1",
- "select sin(c6) alisb from stb1",
+ f"select sin(c1) from {dbname}.t1",
+ f"select sin(c2) from {dbname}.t1",
+ f"select sin(c3) from {dbname}.t1",
+ f"select sin(c4) from {dbname}.t1",
+ f"select sin(c5) from {dbname}.t1",
+ f"select sin(c6) from {dbname}.t1",
+
+ f"select sin(c1) from {dbname}.ct1",
+ f"select sin(c2) from {dbname}.ct1",
+ f"select sin(c3) from {dbname}.ct1",
+ f"select sin(c4) from {dbname}.ct1",
+ f"select sin(c5) from {dbname}.ct1",
+ f"select sin(c6) from {dbname}.ct1",
+
+ f"select sin(c1) from {dbname}.ct3",
+ f"select sin(c2) from {dbname}.ct3",
+ f"select sin(c3) from {dbname}.ct3",
+ f"select sin(c4) from {dbname}.ct3",
+ f"select sin(c5) from {dbname}.ct3",
+ f"select sin(c6) from {dbname}.ct3",
+
+ f"select sin(c1) from {dbname}.stb1",
+ f"select sin(c2) from {dbname}.stb1",
+ f"select sin(c3) from {dbname}.stb1",
+ f"select sin(c4) from {dbname}.stb1",
+ f"select sin(c5) from {dbname}.stb1",
+ f"select sin(c6) from {dbname}.stb1",
+
+ f"select sin(c6) as alisb from {dbname}.stb1",
+ f"select sin(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_sin_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_sin_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select sin(c1) from ct3")
+ tdSql.query(f"select sin(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c2) from ct3")
+ tdSql.query(f"select sin(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c3) from ct3")
+ tdSql.query(f"select sin(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c4) from ct3")
+ tdSql.query(f"select sin(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c5) from ct3")
+ tdSql.query(f"select sin(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c6) from ct3")
+ tdSql.query(f"select sin(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select sin(c1) from t1")
+ tdSql.query(f"select sin(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.841470985)
tdSql.checkData(3 , 0, 0.141120008)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from t1")
-
+ self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c2 ,sin(c2) from ct1")
+ tdSql.query(f"select c2 ,sin(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, -0.220708349)
tdSql.checkData(1 , 1, -0.556921845)
tdSql.checkData(3 , 1, -0.798311364)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,sin(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,sin(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 0.518228108)
tdSql.checkData(2 , 2, 0.996475613)
tdSql.checkData(3 , 2, 0.367960369)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_sin( "select c1, c2, c3 , c4, c5 from ct1", "select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from ct1")
-
+ self.check_result_auto_sin( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from {dbname}.ct1")
+
# nest query for sin functions
- tdSql.query("select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from ct1;")
+ tdSql.query(f"select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 0.035398303)
tdSql.checkData(0 , 2 , 0.035390911)
@@ -281,52 +269,52 @@ class TDTestCase:
tdSql.checkData(11 , 2 , 0.841042171)
tdSql.checkData(11 , 3 , 0.745338326)
- # used for stable table
-
- tdSql.query("select sin(c1) from stb1")
+ # used for stable table
+
+ tdSql.query(f"select sin(c1) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select sin(c1) from stbbb1")
- tdSql.error("select sin(c1) from tbname")
- tdSql.error("select sin(c1) from ct5")
+ tdSql.error(f"select sin(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select sin(c1) from {dbname}.tbname")
+ tdSql.error(f"select sin(c1) from {dbname}.ct5")
+
+ # mix with common col
+ tdSql.query(f"select c1, sin(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, sin(c2) from {dbname}.ct4")
- # mix with common col
- tdSql.query("select c1, sin(c1) from ct1")
- tdSql.query("select c2, sin(c2) from ct4")
-
# mix with common functions
- tdSql.query("select c1, sin(c1),sin(c1), sin(sin(c1)) from ct4 ")
+ tdSql.query(f"select c1, sin(c1),sin(c1), sin(sin(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,-0.279415498)
tdSql.checkData(3 , 2 ,-0.279415498)
tdSql.checkData(3 , 3 ,-0.275793863)
- tdSql.query("select c1, sin(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, sin(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, sin(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, sin(c1),c5, count(c5) from ct1 ")
- tdSql.error("select sin(c1), count(c5) from stb1 ")
- tdSql.error("select sin(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select sin(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select sin(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
-
- # # bug fix for compute
- tdSql.query("select c1, sin(c1) -0 ,sin(c1-4)-0 from ct4 ")
+
+ # # bug fix for compute
+ tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -334,7 +322,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 0.989358247)
tdSql.checkData(1, 2, -0.756802495)
- tdSql.query(" select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -342,35 +330,34 @@ class TDTestCase:
tdSql.checkData(1, 1, 0.989358247)
tdSql.checkData(1, 2, 0.898941342)
- tdSql.query("select c1, sin(c1), c2, sin(c2), c3, sin(c3) from ct1")
+ tdSql.query(f"select c1, sin(c1), c2, sin(c2), c3, sin(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, sin(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.sin(100000000))
- tdSql.query("select c1, sin(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.sin(10000000000000))
- tdSql.query("select c1, sin(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, math.sin(10000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -378,7 +365,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,1.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -386,7 +373,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,-1.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=sin(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=sin(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,0)
@@ -394,45 +381,40 @@ class TDTestCase:
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,-0.100000000)
tdSql.checkData(0,5,0.000000000)
-
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+
+ def check_boundary_values(self, dbname="testdb"):
PI=3.1415926
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from sub1_bound")
-
- self.check_result_auto_sin( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from sub1_bound")
+ self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)) from {dbname}.sub1_bound")
+
+ self.check_result_auto_sin( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from {dbname}.sub1_bound")
+
+ self.check_result_auto_sin(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sin(abs(c1)) from {dbname}.sub1_bound" )
- self.check_result_auto_sin("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sin(abs(c1)) from sub1_bound" )
-
# check basic elem for table per row
- tdSql.query("select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sin(2147483647))
tdSql.checkData(0,1,math.sin(9223372036854775807))
tdSql.checkData(0,2,math.sin(32767))
@@ -450,83 +432,79 @@ class TDTestCase:
tdSql.checkData(3,4,math.sin(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sin(2147483648.000000000))
tdSql.checkData(0,1,math.sin(9223372036854775807))
tdSql.checkData(0,2,math.sin(32767.000000000))
tdSql.checkData(0,3,math.sin(63.500000000))
- tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);")
- tdSql.execute(f'create table tb1 using st tags (1)')
- tdSql.execute(f'create table tb2 using st tags (2)')
- tdSql.execute(f'create table tb3 using st tags (3)')
- tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- for i in range(100):
- tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2))
-
- self.check_result_auto_sin("select num1,num2 from tb3;" , "select sin(num1),sin(num2) from tb3")
-
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_sin( " select c5 from stb1 order by ts " , "select sin(c5) from stb1 order by ts" )
- self.check_result_auto_sin( " select c5 from stb1 order by tbname " , "select sin(c5) from stb1 order by tbname" )
- self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" )
-
- self.check_result_auto_sin( " select t1,c5 from stb1 order by ts " , "select sin(t1), sin(c5) from stb1 order by ts" )
- self.check_result_auto_sin( " select t1,c5 from stb1 order by tbname " , "select sin(t1) ,sin(c5) from stb1 order by tbname" )
- self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) ,sin(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) , sin(c5) from stb1 where c1 > 0 order by tbname" )
- pass
-
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
+ tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
+ tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
+ tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
+
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
+
+ self.check_result_auto_sin(f"select num1,num2 from {dbname}.tb3;" , f"select sin(num1),sin(num2) from {dbname}.tb3")
+
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by ts " , f"select sin(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by tbname " , f"select sin(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sin(t1), sin(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) , sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: sin basic query ============")
+ tdLog.printNoPrefix("==========step4: sin basic query ============")
self.basic_sin_function()
- tdLog.printNoPrefix("==========step5: big number sin query ============")
-
- self.test_big_number()
+ tdLog.printNoPrefix("==========step5: sin filter query ============")
+ self.abs_func_filter()
- tdLog.printNoPrefix("==========step6: sin boundary query ============")
+ tdLog.printNoPrefix("==========step6: big number sin query ============")
- self.check_boundary_values()
+ self.test_big_number()
+
- tdLog.printNoPrefix("==========step7: sin filter query ============")
+ tdLog.printNoPrefix("==========step7: sin boundary query ============")
- self.abs_func_filter()
+ self.check_boundary_values()
+
tdLog.printNoPrefix("==========step8: check sin result of stable query ============")
self.support_super_table_test()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/smaTest.py b/tests/system-test/2-query/smaTest.py
index 67824cc3a3c372821c5014d48f6d2dbbc9ee9066..0217b6c28c2f44deb87c032957ef749fc329742e 100644
--- a/tests/system-test/2-query/smaTest.py
+++ b/tests/system-test/2-query/smaTest.py
@@ -30,14 +30,6 @@ class TDTestCase:
# updatecfgDict = {'debugFlag': 135}
# updatecfgDict = {'fqdn': 135}
- def caseDescription(self):
- '''
- limit and offset keyword function test cases;
- case1: limit offset base function test
- case2: offset return valid
- '''
- return
-
# init
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -47,11 +39,12 @@ class TDTestCase:
self.ts = 1500000000000
- # run case
+ # run case
def run(self):
# insert data
- self.insert_data1("t1", self.ts, 1000*10000)
- self.insert_data1("t4", self.ts, 1000*10000)
+ dbname = "db"
+ self.insert_data1(f"{dbname}.t1", self.ts, 1000*10000)
+ self.insert_data1(f"{dbname}.t4", self.ts, 1000*10000)
# test base case
# self.test_case1()
tdLog.debug(" LIMIT test_case1 ............ [OK]")
@@ -60,7 +53,7 @@ class TDTestCase:
tdLog.debug(" LIMIT test_case2 ............ [OK]")
- # stop
+ # stop
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
@@ -70,16 +63,16 @@ class TDTestCase:
#
# create table
- def create_tables(self):
+ def create_tables(self, dbname="db"):
# super table
- tdSql.execute("create table st(ts timestamp, i1 int,i2 int) tags(area int)");
+ tdSql.execute(f"create table {dbname}.st(ts timestamp, i1 int,i2 int) tags(area int)")
# child table
- tdSql.execute("create table t1 using st tags(1)");
+ tdSql.execute(f"create table {dbname}.t1 using {dbname}.st tags(1)")
- tdSql.execute("create table st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) ");
- tdSql.execute("create table t4 using st1 tags(1)");
+ tdSql.execute(f"create table {dbname}.st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) ")
+ tdSql.execute(f"create table {dbname}.t4 using {dbname}.st1 tags(1)")
- return
+ return
# insert data1
def insert_data(self, tbname, ts_start, count):
@@ -91,7 +84,7 @@ class TDTestCase:
if i >0 and i%30000 == 0:
tdSql.execute(sql)
sql = pre_insert
- # end sql
+ # end sql
if sql != pre_insert:
tdSql.execute(sql)
@@ -107,16 +100,16 @@ class TDTestCase:
if i >0 and i%30000 == 0:
tdSql.execute(sql)
sql = pre_insert
- # end sql
+ # end sql
if sql != pre_insert:
tdSql.execute(sql)
tdLog.debug("INSERT TABLE DATA ............ [OK]")
return
- # test case1 base
+ # test case1 base
# def test_case1(self):
- # #
+ # #
# # limit base function
# #
# # base no where
diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py
index 6cfb9a1dada47949f68a6a79ef05c7c4113a0a2f..4dae2ad6c0ec289e034929e6a949eed8b665c899 100644
--- a/tests/system-test/2-query/sml.py
+++ b/tests/system-test/2-query/sml.py
@@ -20,7 +20,7 @@ class TDTestCase:
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
- def checkFileContent(self):
+ def checkFileContent(self, dbname="sml_db"):
buildPath = tdCom.getBuildPath()
cmdStr = '%s/build/bin/sml_test'%(buildPath)
tdLog.info(cmdStr)
@@ -28,8 +28,8 @@ class TDTestCase:
if ret != 0:
tdLog.exit("sml_test failed")
- tdSql.execute('use sml_db')
- tdSql.query("select * from t_b7d815c9222ca64cdf2614c61de8f211")
+ # tdSql.execute('use sml_db')
+ tdSql.query(f"select * from {dbname}.t_b7d815c9222ca64cdf2614c61de8f211")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '2016-01-01 08:00:07.000')
@@ -44,35 +44,35 @@ class TDTestCase:
tdSql.checkData(0, 9, 0)
tdSql.checkData(0, 10, 25)
- tdSql.query("select * from readings")
+ tdSql.query(f"select * from {dbname}.readings")
tdSql.checkRows(9)
- tdSql.query("select distinct tbname from readings")
+ tdSql.query(f"select distinct tbname from {dbname}.readings")
tdSql.checkRows(4)
- tdSql.query("select * from t_0799064f5487946e5d22164a822acfc8 order by _ts")
+ tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 3, "kk")
- tdSql.checkData(1, 3, None)
+ tdSql.checkData(1, 3, "")
- tdSql.query("select distinct tbname from `sys.if.bytes.out`")
+ tdSql.query(f"select distinct tbname from {dbname}.`sys.if.bytes.out`")
tdSql.checkRows(2)
- tdSql.query("select * from t_fc70dec6677d4277c5d9799c4da806da order by _ts")
+ tdSql.query(f"select * from {dbname}.t_fc70dec6677d4277c5d9799c4da806da order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 1.300000000)
tdSql.checkData(1, 1,13.000000000)
- tdSql.query("select * from `sys.procs.running`")
+ tdSql.query(f"select * from {dbname}.`sys.procs.running`")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 42.000000000)
tdSql.checkData(0, 2, "web01")
- tdSql.query("select distinct tbname from `sys.cpu.nice`")
+ tdSql.query(f"select distinct tbname from {dbname}.`sys.cpu.nice`")
tdSql.checkRows(2)
- tdSql.query("select * from `sys.cpu.nice` order by _ts")
+ tdSql.query(f"select * from {dbname}.`sys.cpu.nice` order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 9.000000000)
tdSql.checkData(0, 2, "lga")
@@ -83,8 +83,11 @@ class TDTestCase:
tdSql.checkData(1, 3, "web01")
tdSql.checkData(1, 4, "t1")
- tdSql.query("select * from macylr")
+ tdSql.query(f"select * from {dbname}.macylr")
tdSql.checkRows(2)
+
+ tdSql.query(f"desc {dbname}.macylr")
+ tdSql.checkRows(25)
return
def run(self):
diff --git a/tests/system-test/2-query/spread.py b/tests/system-test/2-query/spread.py
index 51c569e56567fc7fdf1e2399008eaca5acc4059d..ffe86ff36304224e2d5f776f5088a16b445a5231 100644
--- a/tests/system-test/2-query/spread.py
+++ b/tests/system-test/2-query/spread.py
@@ -26,6 +26,8 @@ TS_TYPE_COL = [ TS_COL, ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
+DBNAME = "db"
+
class TDTestCase:
def init(self, conn, logSql):
@@ -88,6 +90,7 @@ class TDTestCase:
return join_condition
def __where_condition(self, col=None, tbname=None, query_conditon=None):
+ # tbname = tbname.split(".")[-1] if tbname else None
if query_conditon and isinstance(query_conditon, str):
if query_conditon.startswith("count"):
query_conditon = query_conditon[6:-1]
@@ -129,32 +132,33 @@ class TDTestCase:
return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}"
@property
- def __tb_list(self):
+ def __tb_list(self, dbname=DBNAME):
return [
- "ct1",
- "ct4",
- "t1",
- "ct2",
- "stb1",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
+ f"{dbname}.t1",
+ f"{dbname}.ct2",
+ f"{dbname}.stb1",
]
def sql_list(self):
sqls = []
__no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
- select_claus_list = self.__query_condition(tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition(col=select_claus)
- where_claus = self.__where_condition(query_conditon=select_claus)
- having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
- sqls.extend(
- (
- self.__single_sql(select_claus, tb, where_claus, having_claus),
- self.__single_sql(select_claus, tb,),
- self.__single_sql(select_claus, tb, where_condition=where_claus),
- self.__single_sql(select_claus, tb, group_condition=group_claus),
- )
+ tbname = tb.split(".")[-1]
+ select_claus_list = self.__query_condition(tbname)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition(col=select_claus)
+ where_claus = self.__where_condition(query_conditon=select_claus)
+ having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
+ sqls.extend(
+ (
+ self.__single_sql(select_claus, tb, where_claus, having_claus),
+ self.__single_sql(select_claus, tb,),
+ self.__single_sql(select_claus, tb, where_condition=where_claus),
+ self.__single_sql(select_claus, tb, group_condition=group_claus),
)
+ )
# return filter(None, sqls)
return list(filter(None, sqls))
@@ -166,28 +170,28 @@ class TDTestCase:
tdLog.info(f"sql: {sqls[i]}")
tdSql.query(sqls[i])
- def __test_current(self):
- tdSql.query("select spread(ts) from ct1")
+ def __test_current(self, dbname=DBNAME):
+ tdSql.query(f"select spread(ts) from {dbname}.ct1")
tdSql.checkRows(1)
- tdSql.query("select spread(c1) from ct2")
+ tdSql.query(f"select spread(c1) from {dbname}.ct2")
tdSql.checkRows(1)
- tdSql.query("select spread(c1) from ct4 group by c1")
+ tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c1")
tdSql.checkRows(self.rows + 3)
- tdSql.query("select spread(c1) from ct4 group by c7")
+ tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c7")
tdSql.checkRows(3)
- tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts")
+ tdSql.query(f"select spread(ct2.c1) from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.checkRows(1)
self.spread_check()
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("===step 0: err case, must return err")
- tdSql.error( "select spread() from ct1" )
- tdSql.error( "select spread(1, 2) from ct2" )
- tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" )
- tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" )
- tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" )
+ tdSql.error( f"select spread() from {dbname}.ct1" )
+ tdSql.error( f"select spread(1, 2) from {dbname}.ct2" )
+ tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from {dbname}.ct4" )
+ tdSql.error( f"select spread({BOOLEAN_COL[0]}) from {dbname}.t1" )
+ tdSql.error( f"select spread({CHAR_COL[0]}) from {dbname}.stb1" )
# tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
# from ct1
@@ -196,20 +200,20 @@ class TDTestCase:
# having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
# tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
- def all_test(self):
- self.__test_error()
- self.__test_current()
+ def all_test(self, dbname=DBNAME):
+ self.__test_error(dbname)
+ self.__test_current(dbname)
- def __create_tb(self):
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -219,30 +223,30 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -258,7 +262,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -274,13 +278,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
diff --git a/tests/system-test/2-query/sqrt.py b/tests/system-test/2-query/sqrt.py
index 425d59f1186615467f4aac8a085949029422b760..9597375885cf5fdedf1d52a547d7558430cb46e4 100644
--- a/tests/system-test/2-query/sqrt.py
+++ b/tests/system-test/2-query/sqrt.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -85,84 +83,74 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("sqrt function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index])
+
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select sqrt from t1",
- # "select sqrt(-+--+c1 ) from t1",
- # "select +-sqrt(c1) from t1",
- # "select ++-sqrt(c1) from t1",
- # "select ++--sqrt(c1) from t1",
- # "select - -sqrt(c1)*0 from t1",
- # "select sqrt(tbname+1) from t1 ",
- "select sqrt(123--123)==1 from t1",
- "select sqrt(c1) as 'd1' from t1",
- "select sqrt(c1 ,c2) from t1",
- "select sqrt(c1 ,NULL ) from t1",
- "select sqrt(,) from t1;",
- "select sqrt(sqrt(c1) ab from t1)",
- "select sqrt(c1 ) as int from t1",
- "select sqrt from stb1",
- # "select sqrt(-+--+c1) from stb1",
- # "select +-sqrt(c1) from stb1",
- # "select ++-sqrt(c1) from stb1",
- # "select ++--sqrt(c1) from stb1",
- # "select - -sqrt(c1)*0 from stb1",
- # "select sqrt(tbname+1) from stb1 ",
- "select sqrt(123--123)==1 from stb1",
- "select sqrt(c1) as 'd1' from stb1",
- "select sqrt(c1 ,c2 ) from stb1",
- "select sqrt(c1 ,NULL) from stb1",
- "select sqrt(,) from stb1;",
- "select sqrt(sqrt(c1) ab from stb1)",
- "select sqrt(c1) as int from stb1"
+ f"select sqrt from {dbname}.t1",
+ # f"select sqrt(-+--+c1 ) from {dbname}.t1",
+ # f"select +-sqrt(c1) from {dbname}.t1",
+ # f"select ++-sqrt(c1) from {dbname}.t1",
+ # f"select ++--sqrt(c1) from {dbname}.t1",
+ # f"select - -sqrt(c1)*0 from {dbname}.t1",
+ # f"select sqrt(tbname+1) from {dbname}.t1 ",
+ f"select sqrt(123--123)==1 from {dbname}.t1",
+ f"select sqrt(c1) as 'd1' from {dbname}.t1",
+ f"select sqrt(c1 ,c2) from {dbname}.t1",
+ f"select sqrt(c1 ,NULL ) from {dbname}.t1",
+ f"select sqrt(,) from {dbname}.t1;",
+ f"select sqrt(sqrt(c1) ab from {dbname}.t1)",
+ f"select sqrt(c1 ) as int from {dbname}.t1",
+ f"select sqrt from {dbname}.stb1",
+ # f"select sqrt(-+--+c1) from {dbname}.stb1",
+ # f"select +-sqrt(c1) from {dbname}.stb1",
+ # f"select ++-sqrt(c1) from {dbname}.stb1",
+ # f"select ++--sqrt(c1) from {dbname}.stb1",
+ # f"select - -sqrt(c1)*0 from {dbname}.stb1",
+ # f"select sqrt(tbname+1) from {dbname}.stb1 ",
+ f"select sqrt(123--123)==1 from {dbname}.stb1",
+ f"select sqrt(c1) as 'd1' from {dbname}.stb1",
+ f"select sqrt(c1 ,c2 ) from {dbname}.stb1",
+ f"select sqrt(c1 ,NULL) from {dbname}.stb1",
+ f"select sqrt(,) from {dbname}.stb1;",
+ f"select sqrt(sqrt(c1) ab from {dbname}.stb1)",
+ f"select sqrt(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select sqrt(ts) from t1" ,
- "select sqrt(c7) from t1",
- "select sqrt(c8) from t1",
- "select sqrt(c9) from t1",
- "select sqrt(ts) from ct1" ,
- "select sqrt(c7) from ct1",
- "select sqrt(c8) from ct1",
- "select sqrt(c9) from ct1",
- "select sqrt(ts) from ct3" ,
- "select sqrt(c7) from ct3",
- "select sqrt(c8) from ct3",
- "select sqrt(c9) from ct3",
- "select sqrt(ts) from ct4" ,
- "select sqrt(c7) from ct4",
- "select sqrt(c8) from ct4",
- "select sqrt(c9) from ct4",
- "select sqrt(ts) from stb1" ,
- "select sqrt(c7) from stb1",
- "select sqrt(c8) from stb1",
- "select sqrt(c9) from stb1" ,
-
- "select sqrt(ts) from stbbb1" ,
- "select sqrt(c7) from stbbb1",
-
- "select sqrt(ts) from tbname",
- "select sqrt(c9) from tbname"
+ f"select sqrt(ts) from {dbname}.t1" ,
+ f"select sqrt(c7) from {dbname}.t1",
+ f"select sqrt(c8) from {dbname}.t1",
+ f"select sqrt(c9) from {dbname}.t1",
+ f"select sqrt(ts) from {dbname}.ct1" ,
+ f"select sqrt(c7) from {dbname}.ct1",
+ f"select sqrt(c8) from {dbname}.ct1",
+ f"select sqrt(c9) from {dbname}.ct1",
+ f"select sqrt(ts) from {dbname}.ct3" ,
+ f"select sqrt(c7) from {dbname}.ct3",
+ f"select sqrt(c8) from {dbname}.ct3",
+ f"select sqrt(c9) from {dbname}.ct3",
+ f"select sqrt(ts) from {dbname}.ct4" ,
+ f"select sqrt(c7) from {dbname}.ct4",
+ f"select sqrt(c8) from {dbname}.ct4",
+ f"select sqrt(c9) from {dbname}.ct4",
+ f"select sqrt(ts) from {dbname}.stb1" ,
+ f"select sqrt(c7) from {dbname}.stb1",
+ f"select sqrt(c8) from {dbname}.stb1",
+ f"select sqrt(c9) from {dbname}.stb1" ,
+
+ f"select sqrt(ts) from {dbname}.stbbb1" ,
+ f"select sqrt(c7) from {dbname}.stbbb1",
+
+ f"select sqrt(ts) from {dbname}.tbname",
+ f"select sqrt(c9) from {dbname}.tbname"
]
@@ -171,103 +159,103 @@ class TDTestCase:
type_sql_lists = [
- "select sqrt(c1) from t1",
- "select sqrt(c2) from t1",
- "select sqrt(c3) from t1",
- "select sqrt(c4) from t1",
- "select sqrt(c5) from t1",
- "select sqrt(c6) from t1",
-
- "select sqrt(c1) from ct1",
- "select sqrt(c2) from ct1",
- "select sqrt(c3) from ct1",
- "select sqrt(c4) from ct1",
- "select sqrt(c5) from ct1",
- "select sqrt(c6) from ct1",
-
- "select sqrt(c1) from ct3",
- "select sqrt(c2) from ct3",
- "select sqrt(c3) from ct3",
- "select sqrt(c4) from ct3",
- "select sqrt(c5) from ct3",
- "select sqrt(c6) from ct3",
-
- "select sqrt(c1) from stb1",
- "select sqrt(c2) from stb1",
- "select sqrt(c3) from stb1",
- "select sqrt(c4) from stb1",
- "select sqrt(c5) from stb1",
- "select sqrt(c6) from stb1",
-
- "select sqrt(c6) as alisb from stb1",
- "select sqrt(c6) alisb from stb1",
+ f"select sqrt(c1) from {dbname}.t1",
+ f"select sqrt(c2) from {dbname}.t1",
+ f"select sqrt(c3) from {dbname}.t1",
+ f"select sqrt(c4) from {dbname}.t1",
+ f"select sqrt(c5) from {dbname}.t1",
+ f"select sqrt(c6) from {dbname}.t1",
+
+ f"select sqrt(c1) from {dbname}.ct1",
+ f"select sqrt(c2) from {dbname}.ct1",
+ f"select sqrt(c3) from {dbname}.ct1",
+ f"select sqrt(c4) from {dbname}.ct1",
+ f"select sqrt(c5) from {dbname}.ct1",
+ f"select sqrt(c6) from {dbname}.ct1",
+
+ f"select sqrt(c1) from {dbname}.ct3",
+ f"select sqrt(c2) from {dbname}.ct3",
+ f"select sqrt(c3) from {dbname}.ct3",
+ f"select sqrt(c4) from {dbname}.ct3",
+ f"select sqrt(c5) from {dbname}.ct3",
+ f"select sqrt(c6) from {dbname}.ct3",
+
+ f"select sqrt(c1) from {dbname}.stb1",
+ f"select sqrt(c2) from {dbname}.stb1",
+ f"select sqrt(c3) from {dbname}.stb1",
+ f"select sqrt(c4) from {dbname}.stb1",
+ f"select sqrt(c5) from {dbname}.stb1",
+ f"select sqrt(c6) from {dbname}.stb1",
+
+ f"select sqrt(c6) as alisb from {dbname}.stb1",
+ f"select sqrt(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_sqrt_function(self):
+ def basic_sqrt_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select sqrt(c1) from ct3")
+ tdSql.query(f"select sqrt(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c2) from ct3")
+ tdSql.query(f"select sqrt(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c3) from ct3")
+ tdSql.query(f"select sqrt(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c4) from ct3")
+ tdSql.query(f"select sqrt(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c5) from ct3")
+ tdSql.query(f"select sqrt(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c6) from ct3")
+ tdSql.query(f"select sqrt(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select sqrt(c1) from t1")
+ tdSql.query(f"select sqrt(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.000000000)
tdSql.checkData(3 , 0, 1.732050808)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1")
+ self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.t1")
# used for sub table
- tdSql.query("select c2 ,sqrt(c2) from ct1")
+ tdSql.query(f"select c2 ,sqrt(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, 298.140906284)
tdSql.checkData(1 , 1, 278.885281074)
tdSql.checkData(3 , 1, 235.701081881)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,sqrt(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,sqrt(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 2.979932904)
tdSql.checkData(2 , 2, 2.787471970)
tdSql.checkData(3 , 2, 2.580697551)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1")
+ self.check_result_auto_sqrt( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from {dbname}.ct1")
# nest query for sqrt functions
- tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;")
+ tdSql.query(f"select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 9.380831520)
tdSql.checkData(0 , 2 , 3.062814314)
@@ -285,22 +273,22 @@ class TDTestCase:
# used for stable table
- tdSql.query("select sqrt(c1) from stb1")
+ tdSql.query(f"select sqrt(c1) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select sqrt(c1) from stbbb1")
- tdSql.error("select sqrt(c1) from tbname")
- tdSql.error("select sqrt(c1) from ct5")
+ tdSql.error(f"select sqrt(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select sqrt(c1) from {dbname}.tbname")
+ tdSql.error(f"select sqrt(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, sqrt(c1) from ct1")
+ tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,2.828427125)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0.000000000)
- tdSql.query("select c2, sqrt(c2) from ct4")
+ tdSql.query(f"select c2, sqrt(c2) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,55555)
@@ -309,7 +297,7 @@ class TDTestCase:
tdSql.checkData(5 , 1 ,None)
# mix with common functions
- tdSql.query("select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from ct4 ")
+ tdSql.query(f"select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -320,34 +308,34 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,2.449489743)
tdSql.checkData(3 , 3 ,1.565084580)
- tdSql.query("select c1, sqrt(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, sqrt(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, sqrt(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, sqrt(c1),c5, count(c5) from ct1 ")
- tdSql.error("select sqrt(c1), count(c5) from stb1 ")
- tdSql.error("select sqrt(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# # bug fix for compute
- tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ")
+ tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -355,7 +343,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 2.828427125)
tdSql.checkData(1, 2, 2.000000000)
- tdSql.query(" select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -363,57 +351,56 @@ class TDTestCase:
tdSql.checkData(1, 1, 2.828427125)
tdSql.checkData(1, 2, 2.710693865)
- tdSql.query("select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from ct1")
+ tdSql.query(f"select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, sqrt(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, 10000.000000000)
- tdSql.query("select c1, sqrt(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, 3162277.660168380)
- tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, 3162277660171.025390625)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, 100000000000000000.000000000)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, 100000000000000000000.000000000)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def pow_base_test(self):
+ def pow_base_test(self, dbname="db"):
# base is an regular number ,int or double
- tdSql.query("select c1, sqrt(c1) from ct1")
+ tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1")
tdSql.checkData(0, 1,2.828427125)
tdSql.checkRows(13)
# # bug for compute in functions
- # tdSql.query("select c1, abs(1/0) from ct1")
+ # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
- tdSql.query("select c1, sqrt(1) from ct1")
+ tdSql.query(f"select c1, sqrt(1) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# two cols start sqrt(x,y)
- tdSql.query("select c1,c2, sqrt(c2) from ct1")
+ tdSql.query(f"select c1,c2, sqrt(c2) from {dbname}.ct1")
tdSql.checkData(0, 2, 298.140906284)
tdSql.checkData(1, 2, 278.885281074)
tdSql.checkData(4, 2, 0.000000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -421,7 +408,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -429,7 +416,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -437,7 +424,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=sqrt(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=sqrt(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,11111)
@@ -446,42 +433,37 @@ class TDTestCase:
tdSql.checkData(0,4,0.900000000)
tdSql.checkData(0,5,1.000000000)
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound")
+ self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.sub1_bound")
- self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound")
+ self.check_result_auto_sqrt( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from {dbname}.sub1_bound")
- self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" )
+ self.check_result_auto_sqrt(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sqrt(abs(c1)) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483647))
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767))
@@ -499,23 +481,22 @@ class TDTestCase:
tdSql.checkData(3,4,math.sqrt(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483648.000000000))
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767.000000000))
tdSql.checkData(0,3,math.sqrt(63.500000000))
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_sqrt( " select c5 from stb1 order by ts " , "select sqrt(c5) from stb1 order by ts" )
- self.check_result_auto_sqrt( " select c5 from stb1 order by tbname " , "select sqrt(c5) from stb1 order by tbname" )
- self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" )
-
- self.check_result_auto_sqrt( " select t1,c5 from stb1 order by ts " , "select sqrt(t1), sqrt(c5) from stb1 order by ts" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 order by tbname" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) , sqrt(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by ts " , f"select sqrt(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sqrt(t1), sqrt(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) , sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py
index a88c4aef9fdad7580d4d10a642093c80750b1c57..c73c955de405ee54e6924c25cd219aa8b8a7f4eb 100644
--- a/tests/system-test/2-query/statecount.py
+++ b/tests/system-test/2-query/statecount.py
@@ -11,50 +11,47 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -70,68 +67,68 @@ class TDTestCase:
'''
)
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- # "select statecount(c1,'GT',5) from t1"
- "select statecount from t1",
- "select statecount(123--123)==1 from t1",
- "select statecount(123,123) from t1",
- "select statecount(c1,ts) from t1",
- "select statecount(c1,c1,ts) from t1",
- "select statecount(c1 ,c2 ) from t1",
- "select statecount(c1 ,NULL) from t1",
- #"select statecount(c1 ,'NULL',1.0) from t1",
- "select statecount(c1 ,'GT','1') from t1",
- "select statecount(c1 ,'GT','tbname') from t1",
- "select statecount(c1 ,'GT','*') from t1",
- "select statecount(c1 ,'GT',ts) from t1",
- "select statecount(c1 ,'GT',max(c1)) from t1",
- # "select statecount(abs(c1) ,'GT',1) from t1",
- # "select statecount(c1+2 ,'GT',1) from t1",
- "select statecount(c1 ,'GT',1,1u) from t1",
- "select statecount(c1 ,'GT',1,now) from t1",
- "select statecount(c1 ,'GT','1') from t1",
- "select statecount(c1 ,'GT','1',True) from t1",
- "select statecount(statecount(c1) ab from t1)",
- "select statecount(c1 ,'GT',1,,)int from t1",
- "select statecount('c1','GT',1) from t1",
- "select statecount('c1','GT' , NULL) from t1",
- "select statecount('c1','GT', 1 , '') from t1",
- "select statecount('c1','GT', 1 ,c%) from t1",
- "select statecount(c1 ,'GT',1,t1) from t1",
- "select statecount(c1 ,'GT',1,True) from t1",
- "select statecount(c1 ,'GT',1) , count(c1) from t1",
- "select statecount(c1 ,'GT',1) , avg(c1) from t1",
- "select statecount(c1 ,'GT',1) , min(c1) from t1",
- "select statecount(c1 ,'GT',1) , spread(c1) from t1",
- "select statecount(c1 ,'GT',1) , diff(c1) from t1",
+ # f"select statecount(c1,'GT',5) from {dbname}.t1"
+ f"select statecount from {dbname}.t1",
+ f"select statecount(123--123)==1 from {dbname}.t1",
+ f"select statecount(123,123) from {dbname}.t1",
+ f"select statecount(c1,ts) from {dbname}.t1",
+ f"select statecount(c1,c1,ts) from {dbname}.t1",
+ f"select statecount(c1 ,c2 ) from {dbname}.t1",
+ f"select statecount(c1 ,NULL) from {dbname}.t1",
+ #f"select statecount(c1 ,'NULL',1.0) from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','tbname') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','*') from {dbname}.t1",
+ f"select statecount(c1 ,'GT',ts) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',max(c1)) from {dbname}.t1",
+ # f"select statecount(abs(c1) ,'GT',1) from {dbname}.t1",
+ # f"select statecount(c1+2 ,'GT',1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,1u) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,now) from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1',True) from {dbname}.t1",
+ f"select statecount(statecount(c1) ab from {dbname}.t1)",
+ f"select statecount(c1 ,'GT',1,,)int from {dbname}.t1",
+ f"select statecount('c1','GT',1) from {dbname}.t1",
+ f"select statecount('c1','GT' , NULL) from {dbname}.t1",
+ f"select statecount('c1','GT', 1 , '') from {dbname}.t1",
+ f"select statecount('c1','GT', 1 ,c%) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,t1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,True) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , count(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , avg(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , min(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , spread(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , diff(c1) from {dbname}.t1",
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
pass
- def support_types(self):
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select statecount(ts,'GT',1) from t1" ,
- "select statecount(c7,'GT',1) from t1",
- "select statecount(c8,'GT',1) from t1",
- "select statecount(c9,'GT',1) from t1",
- "select statecount(ts,'GT',1) from ct1" ,
- "select statecount(c7,'GT',1) from ct1",
- "select statecount(c8,'GT',1) from ct1",
- "select statecount(c9,'GT',1) from ct1",
- "select statecount(ts,'GT',1) from ct3" ,
- "select statecount(c7,'GT',1) from ct3",
- "select statecount(c8,'GT',1) from ct3",
- "select statecount(c9,'GT',1) from ct3",
- "select statecount(ts,'GT',1) from ct4" ,
- "select statecount(c7,'GT',1) from ct4",
- "select statecount(c8,'GT',1) from ct4",
- "select statecount(c9,'GT',1) from ct4",
- "select statecount(ts,'GT',1) from stb1 partition by tbname" ,
- "select statecount(c7,'GT',1) from stb1 partition by tbname",
- "select statecount(c8,'GT',1) from stb1 partition by tbname",
- "select statecount(c9,'GT',1) from stb1 partition by tbname"
+ f"select statecount(ts,'GT',1) from {dbname}.t1" ,
+ f"select statecount(c7,'GT',1) from {dbname}.t1",
+ f"select statecount(c8,'GT',1) from {dbname}.t1",
+ f"select statecount(c9,'GT',1) from {dbname}.t1",
+ f"select statecount(ts,'GT',1) from {dbname}.ct1" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct1",
+ f"select statecount(c8,'GT',1) from {dbname}.ct1",
+ f"select statecount(c9,'GT',1) from {dbname}.ct1",
+ f"select statecount(ts,'GT',1) from {dbname}.ct3" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct3",
+ f"select statecount(c8,'GT',1) from {dbname}.ct3",
+ f"select statecount(c9,'GT',1) from {dbname}.ct3",
+ f"select statecount(ts,'GT',1) from {dbname}.ct4" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct4",
+ f"select statecount(c8,'GT',1) from {dbname}.ct4",
+ f"select statecount(c9,'GT',1) from {dbname}.ct4",
+ f"select statecount(ts,'GT',1) from {dbname}.stb1 partition by tbname" ,
+ f"select statecount(c7,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c8,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c9,'GT',1) from {dbname}.stb1 partition by tbname"
]
for type_sql in other_no_value_types:
@@ -139,224 +136,222 @@ class TDTestCase:
tdLog.info("support type ok , sql is : %s"%type_sql)
type_sql_lists = [
- "select statecount(c1,'GT',1) from t1",
- "select statecount(c2,'GT',1) from t1",
- "select statecount(c3,'GT',1) from t1",
- "select statecount(c4,'GT',1) from t1",
- "select statecount(c5,'GT',1) from t1",
- "select statecount(c6,'GT',1) from t1",
-
- "select statecount(c1,'GT',1) from ct1",
- "select statecount(c2,'GT',1) from ct1",
- "select statecount(c3,'GT',1) from ct1",
- "select statecount(c4,'GT',1) from ct1",
- "select statecount(c5,'GT',1) from ct1",
- "select statecount(c6,'GT',1) from ct1",
-
- "select statecount(c1,'GT',1) from ct3",
- "select statecount(c2,'GT',1) from ct3",
- "select statecount(c3,'GT',1) from ct3",
- "select statecount(c4,'GT',1) from ct3",
- "select statecount(c5,'GT',1) from ct3",
- "select statecount(c6,'GT',1) from ct3",
-
- "select statecount(c1,'GT',1) from stb1 partition by tbname",
- "select statecount(c2,'GT',1) from stb1 partition by tbname",
- "select statecount(c3,'GT',1) from stb1 partition by tbname",
- "select statecount(c4,'GT',1) from stb1 partition by tbname",
- "select statecount(c5,'GT',1) from stb1 partition by tbname",
- "select statecount(c6,'GT',1) from stb1 partition by tbname",
-
- "select statecount(c6,'GT',1) as alisb from stb1 partition by tbname",
- "select statecount(c6,'GT',1) alisb from stb1 partition by tbname",
+ f"select statecount(c1,'GT',1) from {dbname}.t1",
+ f"select statecount(c2,'GT',1) from {dbname}.t1",
+ f"select statecount(c3,'GT',1) from {dbname}.t1",
+ f"select statecount(c4,'GT',1) from {dbname}.t1",
+ f"select statecount(c5,'GT',1) from {dbname}.t1",
+ f"select statecount(c6,'GT',1) from {dbname}.t1",
+
+ f"select statecount(c1,'GT',1) from {dbname}.ct1",
+ f"select statecount(c2,'GT',1) from {dbname}.ct1",
+ f"select statecount(c3,'GT',1) from {dbname}.ct1",
+ f"select statecount(c4,'GT',1) from {dbname}.ct1",
+ f"select statecount(c5,'GT',1) from {dbname}.ct1",
+ f"select statecount(c6,'GT',1) from {dbname}.ct1",
+
+ f"select statecount(c1,'GT',1) from {dbname}.ct3",
+ f"select statecount(c2,'GT',1) from {dbname}.ct3",
+ f"select statecount(c3,'GT',1) from {dbname}.ct3",
+ f"select statecount(c4,'GT',1) from {dbname}.ct3",
+ f"select statecount(c5,'GT',1) from {dbname}.ct3",
+ f"select statecount(c6,'GT',1) from {dbname}.ct3",
+
+ f"select statecount(c1,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c2,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c3,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c4,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c5,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c6,'GT',1) from {dbname}.stb1 partition by tbname",
+
+ f"select statecount(c6,'GT',1) as alisb from {dbname}.stb1 partition by tbname",
+ f"select statecount(c6,'GT',1) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def support_opers(self):
+ def support_opers(self, dbname="db"):
oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ']
oper_errors = [",","*","NULL","tbname","ts","sum","_c0"]
for oper in oper_lists:
- tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from t1")
+ tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1")
tdSql.checkRows(12)
for oper in oper_errors:
- tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from t1")
-
+ tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1")
- def basic_statecount_function(self):
+ def basic_statecount_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
# will support _rowts mix with
- # tdSql.query("select (c6,'GT',1),_rowts from ct3")
+ # tdSql.query(f"select (c6,'GT',1),_rowts from {dbname}.ct3")
# auto check for t1 table
# used for regular table
- tdSql.query("select statecount(c6,'GT',1) from t1")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.t1")
# unique with super tags
- tdSql.query("select statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) from ct4")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4")
tdSql.checkRows(12)
- tdSql.query("select statecount(c6,'GT',1),tbname from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1),tbname from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1),t1 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1),t1 from {dbname}.ct1")
tdSql.checkRows(13)
# unique with common col
- tdSql.query("select statecount(c6,'GT',1) ,ts from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) ,ts from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) ,c1 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) ,c1 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select c1, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select c1, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1), ts, c1, c2, c3 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) ,ts from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) ,ts from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) ,c1 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) ,c1 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select c1, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select c1, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1), ts, c1, c2, c3 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1")
tdSql.checkRows(13)
# unique with scalar function
- tdSql.query("select statecount(c6,'GT',1) , abs(c1) from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) , abs(c1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) , abs(c2)+2 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.error("select statecount(c6,'GT',1) , unique(c2) from ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) , unique(c2) from {dbname}.ct1")
- tdSql.query("select stateduration(c6,'GT',1) , abs(c1) from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) , abs(c1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) , abs(c2)+2 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.error("select stateduration(c6,'GT',1) , unique(c2) from ct1")
+ tdSql.error(f"select stateduration(c6,'GT',1) , unique(c2) from {dbname}.ct1")
# unique with aggregate function
- tdSql.error("select statecount(c6,'GT',1) ,sum(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,max(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,csum(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,count(c1) from ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,count(c1) from {dbname}.ct1")
# unique with filter where
- tdSql.query("select statecount(c6,'GT',1) from ct4 where c1 is null")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, None)
- tdSql.query("select statecount(c1,'GT',1) from t1 where c1 >2 ")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.t1 where c1 >2 ")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
tdSql.checkData(2, 0, 3)
tdSql.checkData(4, 0, 5)
tdSql.checkData(5, 0, 6)
- tdSql.query("select statecount(c2,'GT',1) from t1 where c2 between 0 and 99999")
+ tdSql.query(f"select statecount(c2,'GT',1) from {dbname}.t1 where c2 between 0 and 99999")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
tdSql.checkData(6, 0, -1)
# unique with union all
- tdSql.query("select statecount(c1,'GT',1) from ct4 union all select statecount(c1,'GT',1) from ct1")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select statecount(c1,'GT',1) from {dbname}.ct1")
tdSql.checkRows(25)
- tdSql.query("select statecount(c1,'GT',1) from ct4 union all select distinct(c1) from ct4")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4")
tdSql.checkRows(22)
# unique with join
# prepare join datas with same ts
- tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(f"create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f"create table {dbname}.tb1 using {dbname}.st1 tags(1)")
+ tdSql.execute(f"create table {dbname}.tb2 using {dbname}.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(f"create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f"create table {dbname}.ttb1 using {dbname}.st2 tags(1)")
+ tdSql.execute(f"create table {dbname}.ttb2 using {dbname}.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(10)
tdSql.checkData(0,0,-1)
tdSql.checkData(1,0,-1)
tdSql.checkData(2,0,1)
tdSql.checkData(9,0,8)
- tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(20)
# nest query
- # tdSql.query("select unique(c1) from (select c1 from ct1)")
- tdSql.query("select c1 from (select statecount(c1,'GT',1) c1 from t1)")
+ # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(12)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, -1)
tdSql.checkData(2, 0, 1)
tdSql.checkData(10, 0, 8)
- tdSql.query("select sum(c1) from (select statecount(c1,'GT',1) c1 from t1)")
+ tdSql.query(f"select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 35)
- tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
- tdSql.query("select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from t1)")
+ tdSql.query(f"select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(12)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 0.000000000)
@@ -365,43 +360,41 @@ class TDTestCase:
# bug for stable
#partition by tbname
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
# group by
- tdSql.error("select statecount(c1,'GT',1) from ct1 group by c1")
- tdSql.error("select statecount(c1,'GT',1) from ct1 group by tbname")
-
- # super table
-
- def check_unit_time(self):
- tdSql.execute(" use db ")
- tdSql.error("select stateduration(c1,'GT',1,1b) from ct1")
- tdSql.error("select stateduration(c1,'GT',1,1u) from ct1")
- tdSql.error("select stateduration(c1,'GT',1,1000s) from t1")
- tdSql.error("select stateduration(c1,'GT',1,10m) from t1")
- tdSql.error("select stateduration(c1,'GT',1,10d) from t1")
- tdSql.query("select stateduration(c1,'GT',1,1s) from t1")
+ tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by tbname")
+
+ def check_unit_time(self, dbname="db"):
+ tdSql.error(f"select stateduration(c1,'GT',1,1b) from {dbname}.ct1")
+ tdSql.error(f"select stateduration(c1,'GT',1,1u) from {dbname}.ct1")
+ tdSql.error(f"select stateduration(c1,'GT',1,1000s) from {dbname}.t1")
+ tdSql.error(f"select stateduration(c1,'GT',1,10m) from {dbname}.t1")
+ tdSql.error(f"select stateduration(c1,'GT',1,10d) from {dbname}.t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1s) from {dbname}.t1")
tdSql.checkData(10,0,63072035)
- tdSql.query("select stateduration(c1,'GT',1,1m) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1m) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60))
- tdSql.query("select stateduration(c1,'GT',1,1h) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1h) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/60))
- tdSql.query("select stateduration(c1,'GT',1,1d) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1d) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/24/60))
- tdSql.query("select stateduration(c1,'GT',1,1w) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1w) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/7/24/60))
def query_precision(self):
def generate_data(precision="ms"):
- tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision))
+ dbname = f"db_{precision}"
+ tdSql.execute(f"create database if not exists db_%s precision '%s';" %(precision, precision))
tdSql.execute("use db_%s;" %precision)
- tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision)
- tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision)
- tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision)
+ tdSql.execute(f"create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision)
+ tdSql.execute(f"create table db_%s.tb1 using {dbname}.st tags(1);"%precision)
+ tdSql.execute(f"create table db_%s.tb2 using {dbname}.st tags(2);"%precision)
if precision == "ms":
start_ts = self.ts
@@ -432,55 +425,54 @@ class TDTestCase:
if pres == "ms":
if unit in ["1u","1b"]:
- tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
pass
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
elif pres == "us" and unit in ["1b"]:
if unit in ["1b"]:
- tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
pass
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
basic_result = 70
tdSql.checkData(9,0,basic_result*pow(1000,index))
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- tdSql.query("select statecount(c1,'GT',1) from sub1_bound")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.sub1_bound")
tdSql.checkRows(5)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py
index f833a42b574aac2cf8cfcab1bae7035b8273c427..ea55c5e44e780c5acdd86b8be29e8654b8d1251e 100644
--- a/tests/system-test/2-query/substr.py
+++ b/tests/system-test/2-query/substr.py
@@ -127,16 +127,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__substr_check(tb, CURRENT_POS, LENS)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__substr_err_check(tb):
@@ -145,22 +145,21 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -170,29 +169,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -208,7 +207,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -224,13 +223,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -259,10 +258,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
-
- tdSql.execute("use db")
+ tdSql.execute("flush database db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py
index 4f5ed34419082d49990f14b6e8518b516c4e7df8..dbc79e25f5ba230723f54507f47da91514698c69 100644
--- a/tests/system-test/2-query/sum.py
+++ b/tests/system-test/2-query/sum.py
@@ -89,14 +89,14 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1"]
for tb in tbname:
self.__sum_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1"]
@@ -106,21 +106,21 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
- def __create_tb(self):
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table {DBNAME}.stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table {DBNAME}.t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -130,83 +130,82 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table {DBNAME}.ct{i+1} using {DBNAME}.stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into {DBNAME}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into {DBNAME}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into {DBNAME}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into {DBNAME}.ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into {DBNAME}.ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into {DBNAME}.ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into {DBNAME}.t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into {DBNAME}.t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
-
def run(self):
tdSql.prepare()
@@ -219,12 +218,8 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- # tdDnodes.stop(1)
- # tdDnodes.start(1)
-
tdSql.execute("flush database db")
-
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py
index d708873d6ff608581a64120a054c81f0b3a8da1f..687023f57ec833248c2c7c472b751019a90f930f 100644
--- a/tests/system-test/2-query/tail.py
+++ b/tests/system-test/2-query/tail.py
@@ -10,49 +10,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -67,115 +64,115 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
- def test_errors(self):
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select tail from t1",
- "select tail(123--123)==1 from t1",
- "select tail(123,123) from t1",
- "select tail(c1,ts) from t1",
- "select tail(c1,c1,ts) from t1",
- "select tail(c1) as 'd1' from t1",
- "select tail(c1 ,c2 ) from t1",
- "select tail(c1 ,NULL) from t1",
- "select tail(,) from t1;",
- "select tail(tail(c1) ab from t1)",
- "select tail(c1) as int from t1",
- "select tail('c1') from t1",
- "select tail(NULL) from t1",
- "select tail('') from t1",
- "select tail(c%) from t1",
- "select tail(t1) from t1",
- "select tail(True) from t1",
- "select tail(c1,1) , count(c1) from t1",
- "select tail(c1,1) , avg(c1) from t1",
- "select tail(c1,1) , min(c1) from t1",
- "select tail(c1,1) , spread(c1) from t1",
- "select tail(c1,1) , diff(c1) from t1",
- "select tail from stb1 partition by tbname",
- "select tail(123--123)==1 from stb1 partition by tbname",
- "select tail(123,123) from stb1 partition by tbname",
- "select tail(c1,ts) from stb1 partition by tbname",
- "select tail(c1,c1,ts) from stb1 partition by tbname",
- "select tail(c1) as 'd1' from stb1 partition by tbname",
- "select tail(c1 ,c2 ) from stb1 partition by tbname",
- "select tail(c1 ,NULL) from stb1 partition by tbname",
- "select tail(,) from stb1 partition by tbname;",
- "select tail(tail(c1) ab from stb1 partition by tbname)",
- "select tail(c1) as int from stb1 partition by tbname",
- "select tail('c1') from stb1 partition by tbname",
- "select tail(NULL) from stb1 partition by tbname",
- "select tail('') from stb1 partition by tbname",
- "select tail(c%) from stb1 partition by tbname",
- "select tail(t1) from stb1 partition by tbname",
- "select tail(True) from stb1 partition by tbname",
- "select tail(c1,1) , count(c1) from stb1 partition by tbname",
- "select tail(c1,1) , avg(c1) from stb1 partition by tbname",
- "select tail(c1,1) , min(c1) from stb1 partition by tbname",
- "select tail(c1,1) , spread(c1) from stb1 partition by tbname",
- "select tail(c1,1) , diff(c1) from stb1 partition by tbname",
+ f"select tail from {dbname}.t1",
+ f"select tail(123--123)==1 from {dbname}.t1",
+ f"select tail(123,123) from {dbname}.t1",
+ f"select tail(c1,ts) from {dbname}.t1",
+ f"select tail(c1,c1,ts) from {dbname}.t1",
+ f"select tail(c1) as 'd1' from {dbname}.t1",
+ f"select tail(c1 ,c2 ) from {dbname}.t1",
+ f"select tail(c1 ,NULL) from {dbname}.t1",
+ f"select tail(,) from {dbname}.t1;",
+ f"select tail(tail(c1) ab from {dbname}.t1)",
+ f"select tail(c1) as int from {dbname}.t1",
+ f"select tail('c1') from {dbname}.t1",
+ f"select tail(NULL) from {dbname}.t1",
+ f"select tail('') from {dbname}.t1",
+ f"select tail(c%) from {dbname}.t1",
+ f"select tail(t1) from {dbname}.t1",
+ f"select tail(True) from {dbname}.t1",
+ f"select tail(c1,1) , count(c1) from {dbname}.t1",
+ f"select tail(c1,1) , avg(c1) from {dbname}.t1",
+ f"select tail(c1,1) , min(c1) from {dbname}.t1",
+ f"select tail(c1,1) , spread(c1) from {dbname}.t1",
+ f"select tail(c1,1) , diff(c1) from {dbname}.t1",
+ f"select tail from {dbname}.stb1 partition by tbname",
+ f"select tail(123--123)==1 from {dbname}.stb1 partition by tbname",
+ f"select tail(123,123) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1) as 'd1' from {dbname}.stb1 partition by tbname",
+ f"select tail(c1 ,c2 ) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1 ,NULL) from {dbname}.stb1 partition by tbname",
+ f"select tail(,) from {dbname}.stb1 partition by tbname;",
+ f"select tail(tail(c1) ab from {dbname}.stb1 partition by tbname)",
+ f"select tail(c1) as int from {dbname}.stb1 partition by tbname",
+ f"select tail('c1') from {dbname}.stb1 partition by tbname",
+ f"select tail(NULL) from {dbname}.stb1 partition by tbname",
+ f"select tail('') from {dbname}.stb1 partition by tbname",
+ f"select tail(c%) from {dbname}.stb1 partition by tbname",
+ f"select tail(t1) from {dbname}.stb1 partition by tbname",
+ f"select tail(True) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , count(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , avg(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , min(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , spread(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , diff(c1) from {dbname}.stb1 partition by tbname",
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select tail(ts,1) from t1" ,
- "select tail(c7,1) from t1",
- "select tail(c8,1) from t1",
- "select tail(c9,1) from t1",
- "select tail(ts,1) from ct1" ,
- "select tail(c7,1) from ct1",
- "select tail(c8,1) from ct1",
- "select tail(c9,1) from ct1",
- "select tail(ts,1) from ct3" ,
- "select tail(c7,1) from ct3",
- "select tail(c8,1) from ct3",
- "select tail(c9,1) from ct3",
- "select tail(ts,1) from ct4" ,
- "select tail(c7,1) from ct4",
- "select tail(c8,1) from ct4",
- "select tail(c9,1) from ct4",
- "select tail(ts,1) from stb1 partition by tbname" ,
- "select tail(c7,1) from stb1 partition by tbname",
- "select tail(c8,1) from stb1 partition by tbname",
- "select tail(c9,1) from stb1 partition by tbname"
+ f"select tail(ts,1) from {dbname}.t1" ,
+ f"select tail(c7,1) from {dbname}.t1",
+ f"select tail(c8,1) from {dbname}.t1",
+ f"select tail(c9,1) from {dbname}.t1",
+ f"select tail(ts,1) from {dbname}.ct1" ,
+ f"select tail(c7,1) from {dbname}.ct1",
+ f"select tail(c8,1) from {dbname}.ct1",
+ f"select tail(c9,1) from {dbname}.ct1",
+ f"select tail(ts,1) from {dbname}.ct3" ,
+ f"select tail(c7,1) from {dbname}.ct3",
+ f"select tail(c8,1) from {dbname}.ct3",
+ f"select tail(c9,1) from {dbname}.ct3",
+ f"select tail(ts,1) from {dbname}.ct4" ,
+ f"select tail(c7,1) from {dbname}.ct4",
+ f"select tail(c8,1) from {dbname}.ct4",
+ f"select tail(c9,1) from {dbname}.ct4",
+ f"select tail(ts,1) from {dbname}.stb1 partition by tbname" ,
+ f"select tail(c7,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c8,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c9,1) from {dbname}.stb1 partition by tbname"
]
-
+
for type_sql in other_no_value_types:
tdSql.query(type_sql)
-
+
type_sql_lists = [
- "select tail(c1,1) from t1",
- "select tail(c2,1) from t1",
- "select tail(c3,1) from t1",
- "select tail(c4,1) from t1",
- "select tail(c5,1) from t1",
- "select tail(c6,1) from t1",
-
- "select tail(c1,1) from ct1",
- "select tail(c2,1) from ct1",
- "select tail(c3,1) from ct1",
- "select tail(c4,1) from ct1",
- "select tail(c5,1) from ct1",
- "select tail(c6,1) from ct1",
-
- "select tail(c1,1) from ct3",
- "select tail(c2,1) from ct3",
- "select tail(c3,1) from ct3",
- "select tail(c4,1) from ct3",
- "select tail(c5,1) from ct3",
- "select tail(c6,1) from ct3",
-
- "select tail(c1,1) from stb1 partition by tbname",
- "select tail(c2,1) from stb1 partition by tbname",
- "select tail(c3,1) from stb1 partition by tbname",
- "select tail(c4,1) from stb1 partition by tbname",
- "select tail(c5,1) from stb1 partition by tbname",
- "select tail(c6,1) from stb1 partition by tbname",
-
- "select tail(c6,1) as alisb from stb1 partition by tbname",
- "select tail(c6,1) alisb from stb1 partition by tbname",
+ f"select tail(c1,1) from {dbname}.t1",
+ f"select tail(c2,1) from {dbname}.t1",
+ f"select tail(c3,1) from {dbname}.t1",
+ f"select tail(c4,1) from {dbname}.t1",
+ f"select tail(c5,1) from {dbname}.t1",
+ f"select tail(c6,1) from {dbname}.t1",
+
+ f"select tail(c1,1) from {dbname}.ct1",
+ f"select tail(c2,1) from {dbname}.ct1",
+ f"select tail(c3,1) from {dbname}.ct1",
+ f"select tail(c4,1) from {dbname}.ct1",
+ f"select tail(c5,1) from {dbname}.ct1",
+ f"select tail(c6,1) from {dbname}.ct1",
+
+ f"select tail(c1,1) from {dbname}.ct3",
+ f"select tail(c2,1) from {dbname}.ct3",
+ f"select tail(c3,1) from {dbname}.ct3",
+ f"select tail(c4,1) from {dbname}.ct3",
+ f"select tail(c5,1) from {dbname}.ct3",
+ f"select tail(c6,1) from {dbname}.ct3",
+
+ f"select tail(c1,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c2,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c3,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c4,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c5,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c6,1) from {dbname}.stb1 partition by tbname",
+
+ f"select tail(c6,1) as alisb from {dbname}.stb1 partition by tbname",
+ f"select tail(c6,1) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
@@ -189,7 +186,6 @@ class TDTestCase:
tail_result = tdSql.queryResult
tdSql.query(equal_sql)
- print(equal_sql)
equal_result = tdSql.queryResult
@@ -198,257 +194,255 @@ class TDTestCase:
else:
tdLog.exit(" tail query check fail , tail sql is: %s " %tail_sql)
- def basic_tail_function(self):
+ def basic_tail_function(self, dbname="db"):
- # basic query
- tdSql.query("select c1 from ct3")
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select tail(c1,1) from ct3")
+ tdSql.query(f"select tail(c1,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c2,1) from ct3")
+ tdSql.query(f"select tail(c2,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c3,1) from ct3")
+ tdSql.query(f"select tail(c3,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c4,1) from ct3")
+ tdSql.query(f"select tail(c4,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c5,1) from ct3")
+ tdSql.query(f"select tail(c5,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c6,1) from ct3")
-
+ tdSql.query(f"select tail(c6,1) from {dbname}.ct3")
+
# auto check for t1 table
# used for regular table
- tdSql.query("select tail(c1,1) from t1")
-
- tdSql.query("desc t1")
+ tdSql.query(f"select tail(c1,1) from {dbname}.t1")
+
+ tdSql.query(f"desc {dbname}.t1")
col_lists_rows = tdSql.queryResult
col_lists = []
for col_name in col_lists_rows:
if col_name[0] =="ts":
continue
-
+
col_lists.append(col_name[0])
-
+
for col in col_lists:
- for loop in range(100):
+ for loop in range(100):
limit = randint(1,100)
offset = randint(0,100)
- self.check_tail_table("t1" , col , limit , offset)
+ self.check_tail_table(f"{dbname}.t1" , col , limit , offset)
# tail for invalid params
-
- tdSql.error("select tail(c1,-10,10) from ct1")
- tdSql.error("select tail(c1,10,10000) from ct1")
- tdSql.error("select tail(c1,10,-100) from ct1")
- tdSql.error("select tail(c1,100/2,10) from ct1")
- tdSql.error("select tail(c1,5,10*2) from ct1")
- tdSql.query("select tail(c1,100,100) from ct1")
+
+ tdSql.error(f"select tail(c1,-10,10) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10000) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,-100) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,100/2,10) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,5,10*2) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,100,100) from {dbname}.ct1")
tdSql.checkRows(0)
- tdSql.query("select tail(c1,10,100) from ct1")
+ tdSql.query(f"select tail(c1,10,100) from {dbname}.ct1")
tdSql.checkRows(0)
- tdSql.error("select tail(c1,10,101) from ct1")
- tdSql.query("select tail(c1,10,0) from ct1")
- tdSql.query("select tail(c1,100,10) from ct1")
+ tdSql.error(f"select tail(c1,10,101) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,0) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,100,10) from {dbname}.ct1")
tdSql.checkRows(3)
-
+
# tail with super tags
- tdSql.query("select tail(c1,10,10) from ct1")
+ tdSql.query(f"select tail(c1,10,10) from {dbname}.ct1")
tdSql.checkRows(3)
- tdSql.query("select tail(c1,10,10),tbname from ct1")
- tdSql.query("select tail(c1,10,10),t1 from ct1")
+ tdSql.query(f"select tail(c1,10,10),tbname from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10),t1 from {dbname}.ct1")
+
+ # tail with common col
+ tdSql.query(f"select tail(c1,10,10) ,ts from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10) ,c1 from {dbname}.ct1")
- # tail with common col
- tdSql.query("select tail(c1,10,10) ,ts from ct1")
- tdSql.query("select tail(c1,10,10) ,c1 from ct1")
+ # tail with scalar function
+ tdSql.query(f"select tail(c1,10,10) ,abs(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) , tail(c2,10,10) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10) , abs(c2)+2 from {dbname}.ct1")
- # tail with scalar function
- tdSql.query("select tail(c1,10,10) ,abs(c1) from ct1")
- tdSql.error("select tail(c1,10,10) , tail(c2,10,10) from ct1")
- tdSql.query("select tail(c1,10,10) , abs(c2)+2 from ct1")
-
# bug need fix for scalar value or compute again
- # tdSql.error(" select tail(c1,10,10) , 123 from ct1")
- # tdSql.error(" select abs(tail(c1,10,10)) from ct1")
- # tdSql.error(" select abs(tail(c1,10,10)) + 2 from ct1")
+ # tdSql.error(f"select tail(c1,10,10) , 123 from {dbname}.ct1")
+ # tdSql.error(f"select abs(tail(c1,10,10)) from {dbname}.ct1")
+ # tdSql.error(f"select abs(tail(c1,10,10)) + 2 from {dbname}.ct1")
- # tail with aggregate function
- tdSql.error("select tail(c1,10,10) ,sum(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,max(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,csum(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,count(c1) from ct1")
+ # tail with aggregate function
+ tdSql.error(f"select tail(c1,10,10) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,count(c1) from {dbname}.ct1")
# tail with filter where
- tdSql.query("select tail(c1,3,1) from ct4 where c1 is null")
+ tdSql.query(f"select tail(c1,3,1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
- tdSql.query("select tail(c1,3,2) from ct4 where c1 >2 order by 1")
+ tdSql.query(f"select tail(c1,3,2) from {dbname}.ct4 where c1 >2 order by 1")
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 6)
tdSql.checkData(2, 0, 7)
- tdSql.query("select tail(c1,2,1) from ct4 where c2 between 0 and 99999 order by 1")
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
- # tail with union all
- tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct1")
+ # tail with union all
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct1")
tdSql.checkRows(15)
- tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct2 order by 1")
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct2 order by 1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 0)
tdSql.checkData(1, 0, 1)
- tdSql.query("select tail(c2,2,1) from ct4 union all select abs(c2)/2 from ct4")
+ tdSql.query(f"select tail(c2,2,1) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4")
tdSql.checkRows(14)
- # tail with join
- # prepare join datas with same ts
+ # tail with join
+ # prepare join datas with same ts
- tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(f" create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f" create table {dbname}.tb1 using {dbname}.st1 tags(1)")
+ tdSql.execute(f" create table {dbname}.tb2 using {dbname}.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(f" create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f" create table {dbname}.ttb1 using {dbname}.st2 tags(1)")
+ tdSql.execute(f" create table {dbname}.ttb2 using {dbname}.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select tail(tb2.num,3,2) from tb1, tb2 where tb1.ts=tb2.ts order by 1 desc")
+ tdSql.query(f"select tail(tb2.num,3,2) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts order by 1 desc")
tdSql.checkRows(3)
tdSql.checkData(0,0,7)
tdSql.checkData(1,0,6)
tdSql.checkData(2,0,5)
# nest query
- # tdSql.query("select tail(c1,2) from (select _rowts , c1 from ct1)")
- tdSql.query("select c1 from (select tail(c1,2) c1 from ct4) order by 1 nulls first")
+ # tdSql.query(f"select tail(c1,2) from (select _rowts , c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select tail(c1,2) c1 from {dbname}.ct4) order by 1 nulls first")
tdSql.checkRows(2)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 0)
- tdSql.query("select sum(c1) from (select tail(c1,2) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select tail(c1,2) c1 from {dbname}.ct1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 18)
- tdSql.query("select abs(c1) from (select tail(c1,2) c1 from ct1)")
+ tdSql.query(f"select abs(c1) from (select tail(c1,2) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 9)
-
+
#partition by tbname
- tdSql.query(" select tail(c1,5) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(c1,5) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(10)
- tdSql.query(" select tail(c1,3) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(c1,3) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(6)
-
- # group by
- tdSql.error("select tail(c1,2) from ct1 group by c1")
- tdSql.error("select tail(c1,2) from ct1 group by tbname")
+
+ # group by
+ tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by tbname")
# super table
- tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname")
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- # bug need fix
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname")
+ # bug need fix
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname")
# tdSql.checkRows(4)
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname")
# tdSql.checkRows(4)
- # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- # # bug need fix
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname ")
+ # # bug need fix
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- tdSql.query(" select tail(t1,2) from stb1 ")
+ tdSql.query(f"select tail(t1,2) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select tail(t1+c1,2) from stb1 ")
+ tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select tail(t1+c1,2) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(4)
- tdSql.query(" select tail(t1,2) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(t1,2) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(4)
- # nest query
- tdSql.query(" select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from stb1 ) ")
+ # nest query
+ tdSql.query(f"select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.checkData(1,0,9)
- tdSql.query("select tail(t1,2) from (select _rowts , t1 , tbname from stb1 )")
+ tdSql.query(f"select tail(t1,2) from (select _rowts , t1 , tbname from {dbname}.stb1 )")
tdSql.checkRows(2)
tdSql.checkData(0,0,4)
tdSql.checkData(1,0,1)
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
-
+
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
-
- tdSql.query("select tail(c2,2) from sub1_bound order by 1 desc")
+
+ tdSql.query(f"select tail(c2,2) from {dbname}.sub1_bound order by 1 desc")
tdSql.checkRows(2)
tdSql.checkData(0,0,9223372036854775803)
@@ -456,22 +450,22 @@ class TDTestCase:
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: tail basic query ============")
+ tdLog.printNoPrefix("==========step4: tail basic query ============")
self.basic_tail_function()
- tdLog.printNoPrefix("==========step5: tail boundary query ============")
+ tdLog.printNoPrefix("==========step5: tail boundary query ============")
self.check_boundary_values()
diff --git a/tests/system-test/2-query/tan.py b/tests/system-test/2-query/tan.py
index da47c1c2b2560bf617681df10e8788f518b11ac1..683cee37ff7c81ca45b628852134ddbab6e342cf 100644
--- a/tests/system-test/2-query/tan.py
+++ b/tests/system-test/2-query/tan.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -61,18 +59,18 @@ class TDTestCase:
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ ( '2022-12-31 01:01:36.000', 9, -99999, -999, -99, -9.99, -99999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
+
def check_result_auto_tan(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
+
for row in origin_result:
row_check = []
for elem in row:
@@ -82,190 +80,178 @@ class TDTestCase:
elem = math.tan(elem)
row_check.append(elem)
auto_result.append(row_check)
-
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("tan function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("tan value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index] )
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select tan from t1",
- # "select tan(-+--+c1 ) from t1",
- # "select +-tan(c1) from t1",
- # "select ++-tan(c1) from t1",
- # "select ++--tan(c1) from t1",
- # "select - -tan(c1)*0 from t1",
- # "select tan(tbname+1) from t1 ",
- "select tan(123--123)==1 from t1",
- "select tan(c1) as 'd1' from t1",
- "select tan(c1 ,c2) from t1",
- "select tan(c1 ,NULL ) from t1",
- "select tan(,) from t1;",
- "select tan(tan(c1) ab from t1)",
- "select tan(c1 ) as int from t1",
- "select tan from stb1",
- # "select tan(-+--+c1) from stb1",
- # "select +-tan(c1) from stb1",
- # "select ++-tan(c1) from stb1",
- # "select ++--tan(c1) from stb1",
- # "select - -tan(c1)*0 from stb1",
- # "select tan(tbname+1) from stb1 ",
- "select tan(123--123)==1 from stb1",
- "select tan(c1) as 'd1' from stb1",
- "select tan(c1 ,c2 ) from stb1",
- "select tan(c1 ,NULL) from stb1",
- "select tan(,) from stb1;",
- "select tan(tan(c1) ab from stb1)",
- "select tan(c1) as int from stb1"
+ f"select tan from {dbname}.t1",
+ # f"select tan(-+--+c1 ) from {dbname}.t1",
+ # f"select +-tan(c1) from {dbname}.t1",
+ # f"select ++-tan(c1) from {dbname}.t1",
+ # f"select ++--tan(c1) from {dbname}.t1",
+ # f"select - -tan(c1)*0 from {dbname}.t1",
+ # f"select tan(tbname+1) from {dbname}.t1 ",
+ f"select tan(123--123)==1 from {dbname}.t1",
+ f"select tan(c1) as 'd1' from {dbname}.t1",
+ f"select tan(c1 ,c2) from {dbname}.t1",
+ f"select tan(c1 ,NULL ) from {dbname}.t1",
+ f"select tan(,) from {dbname}.t1;",
+ f"select tan(tan(c1) ab from {dbname}.t1)",
+ f"select tan(c1 ) as int from {dbname}.t1",
+ f"select tan from {dbname}.stb1",
+ # f"select tan(-+--+c1) from {dbname}.stb1",
+ # f"select +-tan(c1) from {dbname}.stb1",
+ # f"select ++-tan(c1) from {dbname}.stb1",
+ # f"select ++--tan(c1) from {dbname}.stb1",
+ # f"select - -tan(c1)*0 from {dbname}.stb1",
+ # f"select tan(tbname+1) from {dbname}.stb1 ",
+ f"select tan(123--123)==1 from {dbname}.stb1",
+ f"select tan(c1) as 'd1' from {dbname}.stb1",
+ f"select tan(c1 ,c2 ) from {dbname}.stb1",
+ f"select tan(c1 ,NULL) from {dbname}.stb1",
+ f"select tan(,) from {dbname}.stb1;",
+ f"select tan(tan(c1) ab from {dbname}.stb1)",
+ f"select tan(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select tan(ts) from t1" ,
- "select tan(c7) from t1",
- "select tan(c8) from t1",
- "select tan(c9) from t1",
- "select tan(ts) from ct1" ,
- "select tan(c7) from ct1",
- "select tan(c8) from ct1",
- "select tan(c9) from ct1",
- "select tan(ts) from ct3" ,
- "select tan(c7) from ct3",
- "select tan(c8) from ct3",
- "select tan(c9) from ct3",
- "select tan(ts) from ct4" ,
- "select tan(c7) from ct4",
- "select tan(c8) from ct4",
- "select tan(c9) from ct4",
- "select tan(ts) from stb1" ,
- "select tan(c7) from stb1",
- "select tan(c8) from stb1",
- "select tan(c9) from stb1" ,
-
- "select tan(ts) from stbbb1" ,
- "select tan(c7) from stbbb1",
-
- "select tan(ts) from tbname",
- "select tan(c9) from tbname"
+ f"select tan(ts) from {dbname}.t1" ,
+ f"select tan(c7) from {dbname}.t1",
+ f"select tan(c8) from {dbname}.t1",
+ f"select tan(c9) from {dbname}.t1",
+ f"select tan(ts) from {dbname}.ct1" ,
+ f"select tan(c7) from {dbname}.ct1",
+ f"select tan(c8) from {dbname}.ct1",
+ f"select tan(c9) from {dbname}.ct1",
+ f"select tan(ts) from {dbname}.ct3" ,
+ f"select tan(c7) from {dbname}.ct3",
+ f"select tan(c8) from {dbname}.ct3",
+ f"select tan(c9) from {dbname}.ct3",
+ f"select tan(ts) from {dbname}.ct4" ,
+ f"select tan(c7) from {dbname}.ct4",
+ f"select tan(c8) from {dbname}.ct4",
+ f"select tan(c9) from {dbname}.ct4",
+ f"select tan(ts) from {dbname}.stb1" ,
+ f"select tan(c7) from {dbname}.stb1",
+ f"select tan(c8) from {dbname}.stb1",
+ f"select tan(c9) from {dbname}.stb1" ,
+
+ f"select tan(ts) from {dbname}.stbbb1" ,
+ f"select tan(c7) from {dbname}.stbbb1",
+
+ f"select tan(ts) from {dbname}.tbname",
+ f"select tan(c9) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select tan(c1) from t1",
- "select tan(c2) from t1",
- "select tan(c3) from t1",
- "select tan(c4) from t1",
- "select tan(c5) from t1",
- "select tan(c6) from t1",
-
- "select tan(c1) from ct1",
- "select tan(c2) from ct1",
- "select tan(c3) from ct1",
- "select tan(c4) from ct1",
- "select tan(c5) from ct1",
- "select tan(c6) from ct1",
-
- "select tan(c1) from ct3",
- "select tan(c2) from ct3",
- "select tan(c3) from ct3",
- "select tan(c4) from ct3",
- "select tan(c5) from ct3",
- "select tan(c6) from ct3",
-
- "select tan(c1) from stb1",
- "select tan(c2) from stb1",
- "select tan(c3) from stb1",
- "select tan(c4) from stb1",
- "select tan(c5) from stb1",
- "select tan(c6) from stb1",
-
- "select tan(c6) as alisb from stb1",
- "select tan(c6) alisb from stb1",
+ f"select tan(c1) from {dbname}.t1",
+ f"select tan(c2) from {dbname}.t1",
+ f"select tan(c3) from {dbname}.t1",
+ f"select tan(c4) from {dbname}.t1",
+ f"select tan(c5) from {dbname}.t1",
+ f"select tan(c6) from {dbname}.t1",
+
+ f"select tan(c1) from {dbname}.ct1",
+ f"select tan(c2) from {dbname}.ct1",
+ f"select tan(c3) from {dbname}.ct1",
+ f"select tan(c4) from {dbname}.ct1",
+ f"select tan(c5) from {dbname}.ct1",
+ f"select tan(c6) from {dbname}.ct1",
+
+ f"select tan(c1) from {dbname}.ct3",
+ f"select tan(c2) from {dbname}.ct3",
+ f"select tan(c3) from {dbname}.ct3",
+ f"select tan(c4) from {dbname}.ct3",
+ f"select tan(c5) from {dbname}.ct3",
+ f"select tan(c6) from {dbname}.ct3",
+
+ f"select tan(c1) from {dbname}.stb1",
+ f"select tan(c2) from {dbname}.stb1",
+ f"select tan(c3) from {dbname}.stb1",
+ f"select tan(c4) from {dbname}.stb1",
+ f"select tan(c5) from {dbname}.stb1",
+ f"select tan(c6) from {dbname}.stb1",
+
+ f"select tan(c6) as alisb from {dbname}.stb1",
+ f"select tan(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_tan_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_tan_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select tan(c1) from ct3")
+ tdSql.query(f"select tan(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c2) from ct3")
+ tdSql.query(f"select tan(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c3) from ct3")
+ tdSql.query(f"select tan(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c4) from ct3")
+ tdSql.query(f"select tan(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c5) from ct3")
+ tdSql.query(f"select tan(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c6) from ct3")
+ tdSql.query(f"select tan(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select tan(c1) from t1")
+ tdSql.query(f"select tan(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.557407725)
tdSql.checkData(3 , 0, -0.142546543)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from t1")
-
+ self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.t1", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c2 ,tan(c2) from ct1")
+ tdSql.query(f"select c2 ,tan(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, -0.226288661)
tdSql.checkData(1 , 1, 0.670533806)
tdSql.checkData(3 , 1, -1.325559275)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,tan(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,tan(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, -0.605942929)
tdSql.checkData(2 , 2, 11.879355609)
tdSql.checkData(3 , 2, 0.395723765)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_tan( "select c1, c2, c3 , c4, c5 from ct1", "select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from ct1")
-
+ self.check_result_auto_tan( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from {dbname}.ct1")
+
# nest query for tan functions
- tdSql.query("select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from ct1;")
+ tdSql.query(f"select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 0.035420501)
tdSql.checkData(0 , 2 , 0.035435322)
@@ -281,52 +267,52 @@ class TDTestCase:
tdSql.checkData(11 , 2 , -0.040227928)
tdSql.checkData(11 , 3 , -0.040249642)
- # used for stable table
-
- tdSql.query("select tan(c1) from stb1")
+ # used for stable table
+
+ tdSql.query(f"select tan(c1) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select tan(c1) from stbbb1")
- tdSql.error("select tan(c1) from tbname")
- tdSql.error("select tan(c1) from ct5")
+ tdSql.error(f"select tan(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select tan(c1) from {dbname}.tbname")
+ tdSql.error(f"select tan(c1) from {dbname}.ct5")
+
+ # mix with common col
+ tdSql.query(f"select c1, tan(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, tan(c2) from {dbname}.ct4")
- # mix with common col
- tdSql.query("select c1, tan(c1) from ct1")
- tdSql.query("select c2, tan(c2) from ct4")
-
# mix with common functions
- tdSql.query("select c1, tan(c1),tan(c1), tan(tan(c1)) from ct4 ")
+ tdSql.query(f"select c1, tan(c1),tan(c1), tan(tan(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,-0.291006191)
tdSql.checkData(3 , 2 ,-0.291006191)
tdSql.checkData(3 , 3 ,-0.299508909)
- tdSql.query("select c1, tan(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, tan(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, tan(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, tan(c1),c5, count(c5) from ct1 ")
- tdSql.error("select tan(c1), count(c5) from stb1 ")
- tdSql.error("select tan(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select tan(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select tan(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
+
-
- # # bug fix for compute
- tdSql.query("select c1, tan(c1) -0 ,tan(c1-4)-0 from ct4 ")
+ # # bug fix for compute
+ tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -334,7 +320,7 @@ class TDTestCase:
tdSql.checkData(1, 1, -6.799711455)
tdSql.checkData(1, 2, 1.157821282)
- tdSql.query(" select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -342,35 +328,33 @@ class TDTestCase:
tdSql.checkData(1, 1, -6.799711455)
tdSql.checkData(1, 2, -21.815112681)
- tdSql.query("select c1, tan(c1), c2, tan(c2), c3, tan(c3) from ct1")
+ tdSql.query(f"select c1, tan(c1), c2, tan(c2), c3, tan(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, tan(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.tan(100000000))
-
- tdSql.query("select c1, tan(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.tan(10000000000000))
- tdSql.query("select c1, tan(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, math.tan(10000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -378,7 +362,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,-7.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -386,7 +370,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,-3.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>tan(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>tan(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -394,45 +378,40 @@ class TDTestCase:
tdSql.checkData(0,3,8.000000000)
tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,-7.000000000)
-
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+
+ def check_boundary_values(self, dbname="bound_test"):
PI=3.1415926
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from sub1_bound")
-
- self.check_result_auto_tan( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from sub1_bound")
+ self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.sub1_bound")
+
+ self.check_result_auto_tan( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from {dbname}.sub1_bound")
+
+ self.check_result_auto_tan(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select tan(abs(c1)) from {dbname}.sub1_bound" )
- self.check_result_auto_tan("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select tan(abs(c1)) from sub1_bound" )
-
# check basic elem for table per row
- tdSql.query("select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.tan(2147483647))
tdSql.checkData(0,1,math.tan(9223372036854775807))
tdSql.checkData(0,2,math.tan(32767))
@@ -450,76 +429,71 @@ class TDTestCase:
tdSql.checkData(3,4,math.tan(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.tan(2147483648.000000000))
tdSql.checkData(0,1,math.tan(9223372036854775807))
tdSql.checkData(0,2,math.tan(32767.000000000))
tdSql.checkData(0,3,math.tan(63.500000000))
- tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);")
- tdSql.execute(f'create table tb1 using st tags (1)')
- tdSql.execute(f'create table tb2 using st tags (2)')
- tdSql.execute(f'create table tb3 using st tags (3)')
- tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- for i in range(100):
- tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2))
-
- self.check_result_auto_tan("select num1,num2 from tb3;" , "select tan(num1),tan(num2) from tb3")
-
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_tan( " select c5 from stb1 order by ts " , "select tan(c5) from stb1 order by ts" )
- self.check_result_auto_tan( " select c5 from stb1 order by tbname " , "select tan(c5) from stb1 order by tbname" )
- self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" )
-
- self.check_result_auto_tan( " select t1,c5 from stb1 order by ts " , "select tan(t1), tan(c5) from stb1 order by ts" )
- self.check_result_auto_tan( " select t1,c5 from stb1 order by tbname " , "select tan(t1) ,tan(c5) from stb1 order by tbname" )
- self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) ,tan(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) , tan(c5) from stb1 where c1 > 0 order by tbname" )
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
+ tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
+ tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
+ tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
+
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
+
+ self.check_result_auto_tan(f"select num1,num2 from {dbname}.tb3;" , f"select tan(num1),tan(num2) from {dbname}.tb3")
+
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by ts " , f"select tan(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by tbname " , f"select tan(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select tan(t1), tan(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) , tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
-
-
+
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: tan basic query ============")
+ tdLog.printNoPrefix("==========step4: tan basic query ============")
self.basic_tan_function()
- tdLog.printNoPrefix("==========step5: big number tan query ============")
+ tdLog.printNoPrefix("==========step5: big number tan query ============")
self.test_big_number()
-
- tdLog.printNoPrefix("==========step6: tan boundary query ============")
+ tdLog.printNoPrefix("==========step6: tan boundary query ============")
self.check_boundary_values()
- tdLog.printNoPrefix("==========step7: tan filter query ============")
+ tdLog.printNoPrefix("==========step7: tan filter query ============")
self.abs_func_filter()
diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py
index 3551d8ee2cfb0669c23ed1754ebcb65c69e48daa..d773114c3c3d84bb6b102852d84223d68e0c0a2f 100644
--- a/tests/system-test/2-query/timetruncate.py
+++ b/tests/system-test/2-query/timetruncate.py
@@ -25,6 +25,7 @@ class TDTestCase:
self.ntbname = f'{self.dbname}.ntb'
self.stbname = f'{self.dbname}.stb'
self.ctbname = f'{self.dbname}.ctb'
+
def check_ms_timestamp(self,unit,date_time):
if unit.lower() == '1a':
for i in range(len(self.ts_str)):
@@ -45,11 +46,12 @@ class TDTestCase:
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0]))
- tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000)
+ tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000)
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24/7)*7*24*60*60*1000)
+
def check_us_timestamp(self,unit,date_time):
if unit.lower() == '1u':
for i in range(len(self.ts_str)):
@@ -74,11 +76,12 @@ class TDTestCase:
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0]))
- tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 )
+ tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 )
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000)
+
def check_ns_timestamp(self,unit,date_time):
if unit.lower() == '1b':
for i in range(len(self.ts_str)):
@@ -100,21 +103,23 @@ class TDTestCase:
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 )
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
- tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 )
+ tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 )
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000)
+
def check_tb_type(self,unit,tb_type):
- if tb_type.lower() == 'ntb':
+ if tb_type.lower() == 'ntb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}')
+
def data_check(self,date_time,precision,tb_type):
for unit in self.time_unit:
if (unit.lower() == '1u' and precision.lower() == 'ms') or (unit.lower() == '1b' and precision.lower() == 'us') or (unit.lower() == '1b' and precision.lower() == 'ms'):
- if tb_type.lower() == 'ntb':
+ if tb_type.lower() == 'ntb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}')
@@ -139,16 +144,19 @@ class TDTestCase:
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}')
+
def function_check_ntb(self):
for precision in self.db_param_precision:
tdSql.execute(f'drop database if exists {self.dbname}')
tdSql.execute(f'create database {self.dbname} precision "{precision}"')
+ tdLog.info(f"=====now is in a {precision} database=====")
tdSql.execute(f'use {self.dbname}')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
for ts in self.ts_str:
tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)')
date_time = self.get_time.time_transform(self.ts_str,precision)
self.data_check(date_time,precision,'ntb')
+
def function_check_stb(self):
for precision in self.db_param_precision:
tdSql.execute(f'drop database if exists {self.dbname}')
@@ -161,9 +169,11 @@ class TDTestCase:
date_time = self.get_time.time_transform(self.ts_str,precision)
self.data_check(date_time,precision,'ctb')
self.data_check(date_time,precision,'stb')
+
def run(self):
self.function_check_ntb()
self.function_check_stb()
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py
index 617f7e74643c9b1dbb24834e3535b4bac669e4bb..04a80a74ad2d6ec21a97dc17bba05fb02df3830b 100644
--- a/tests/system-test/2-query/tsbsQuery.py
+++ b/tests/system-test/2-query/tsbsQuery.py
@@ -22,7 +22,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def create_ctable(self,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
@@ -32,16 +32,16 @@ class TDTestCase:
for i in range(ctbNum):
tagValue = 'beijing'
if (i % 10 == 0):
- sql += " %s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i))
+ sql += f" {dbName}.%s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i))
else:
model = 'H-%d'%i
- sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i))
+ sql += f" {dbName}.%s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i))
if (i > 0) and (i%1000 == 0):
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -54,32 +54,32 @@ class TDTestCase:
startTs = int(round(t * 1000))
for i in range(ctbNum):
- sql += " %s%d values "%(ctbPrefix,i)
+ sql += f" {dbName}.%s%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
if(ctbPrefix=="rct"):
sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}) "
elif ( ctbPrefix=="dct"):
status= random.randint(0,1)
- sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) "
+ sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) "
# tdLog.debug("1insert sql:%s"%sql)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
# tdLog.debug("2insert sql:%s"%sql)
tsql.execute(sql)
if j < rowsPerTbl - 1:
- sql = "insert into %s%d values " %(ctbPrefix,i)
+ sql = f"insert into {dbName}.%s%d values " %(ctbPrefix,i)
else:
sql = "insert into "
if sql != pre_insert:
# tdLog.debug("3insert sql:%s"%sql)
- tsql.execute(sql)
+ tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareData(self):
dbname="db_tsbs"
- stabname1="readings"
- stabname2="diagnostics"
- ctbnamePre1="rct"
+ stabname1=f"{dbname}.readings"
+ stabname2=f"{dbname}.diagnostics"
+ ctbnamePre1="rct"
ctbnamePre2="dct"
ctbNums=50
self.ctbNums=ctbNums
@@ -107,7 +107,7 @@ class TDTestCase:
# tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')")
# else:
# tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
- # for j in range(ctbNums):
+ # for j in range(ctbNums):
# for i in range(rowNUms):
# tdSql.execute(
# f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
@@ -133,106 +133,106 @@ class TDTestCase:
# tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
- def tsbsIotQuery(self,insertinto=True):
-
+ def tsbsIotQuery(self,insertinto=True, dbname="db_tsbs"):
+
tdSql.execute("use db_tsbs")
-
+
# test interval and partition
- tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
+ tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
parRows=tdSql.queryRows
- tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
+ tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
tdSql.checkRows(parRows)
-
-
- # test insert into
+
+
+ # test insert into
if insertinto == True :
- tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
- tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
-
- tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
+ tdSql.execute(f"create table {dbname}.testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
+ tdSql.query(f"insert into {dbname}.testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
+
+ tdSql.query(f"insert into {dbname}.testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
# test paitition interval fill
- tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
+ tdSql.query(f"select name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
# test partition interval limit (PRcore-TD-17410)
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings partition BY name,driver,fleet interval (10m) limit 1);")
tdSql.checkRows(self.ctbNums)
# test partition interval Pseudo time-column
- tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ tdSql.query(f"select count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
# 1 high-load:
- tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
+ tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
- tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
+ tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
- # 2 stationary-trucks
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
+ # 2 stationary-trucks
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
# 3 long-driving-sessions
- tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
+ tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity from {dbname}.readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
#4 long-daily-sessions
- tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
+ tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity from {dbname}.readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
# 5. avg-daily-driving-duration
- tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
+ tdSql.query(f"select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from {dbname}.readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
- # # 6. avg-daily-driving-session
+ # # 6. avg-daily-driving-session
# #taosc core dumped
- tdSql.query(" SELECT _wstart as ts,name,floor(avg(velocity)/5) AS mv FROM readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);")
- # tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
- # tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
+ tdSql.query(f"select _wstart as ts,name,floor(avg(velocity)/5) AS mv from {dbname}.readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);")
+ # tdSql.query(f"select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
+ # tdSql.query(f"select _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
# 7. avg-load
- tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
+ tdSql.query(f"select fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml from {dbname}.diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
- # 8. daily-activity
- tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ # 8. daily-activity
+ tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
- tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
- tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
+ tdSql.query(f"select _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
- tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
+ tdSql.query(f"select _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
# 9. breakdown-frequency
# NULL ---count(NULL)=0 expect count(NULL)= 100
- tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
+ tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
parRows=tdSql.queryRows
assert parRows != 0 , "query result is wrong, query rows %d but expect > 0 " %parRows
- tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
- sql="select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;"
+ tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
+ sql=f"select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;"
# for i in range(2):
# tdSql.query("%s"%sql)
- # quertR1=tdSql.queryResult
+ # quertR1=tdSql.queryResult
# for j in range(50):
# tdSql.query("%s"%sql)
# quertR2=tdSql.queryResult
- # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2)
+ # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2)
+
-
#it's already supported:
# last-loc
- tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
+ tdSql.query(f"select last_row(ts),latitude,longitude,name,driver from {dbname}.readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
#2. low-fuel
- tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
-
+ tdSql.query(f"select last_row(ts),name,driver,fuel_state,driver from {dbname}.diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
+
# 3. avg-vs-projected-fuel-consumption
- tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet")
-
- def run(self):
+ tdSql.query(f"select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from {dbname}.readings where velocity > 1 group by fleet")
+
+ def run(self):
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
self.prepareData()
self.tsbsIotQuery()
diff --git a/tests/system-test/2-query/ttl_comment.py b/tests/system-test/2-query/ttl_comment.py
index 33bd61b66c85a2519513b9eee10bfcdaff8e8925..c26393158cefe46fb054d7bd3e28a621cab73199 100644
--- a/tests/system-test/2-query/ttl_comment.py
+++ b/tests/system-test/2-query/ttl_comment.py
@@ -26,20 +26,21 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
def run(self):
+ dbname="db"
tdSql.prepare()
- tdSql.error("create table ttl_table1(ts timestamp, i int) ttl 1.1")
- tdSql.error("create table ttl_table2(ts timestamp, i int) ttl 1e1")
- tdSql.error("create table ttl_table3(ts timestamp, i int) ttl -1")
+ tdSql.error(f"create table {dbname}.ttl_table1(ts timestamp, i int) ttl 1.1")
+ tdSql.error(f"create table {dbname}.ttl_table2(ts timestamp, i int) ttl 1e1")
+ tdSql.error(f"create table {dbname}.ttl_table3(ts timestamp, i int) ttl -1")
print("============== STEP 1 ===== test normal table")
- tdSql.execute("create table normal_table1(ts timestamp, i int)")
- tdSql.execute("create table normal_table2(ts timestamp, i int) comment '' ttl 3")
- tdSql.execute("create table normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'")
+ tdSql.execute(f"create table {dbname}.normal_table1(ts timestamp, i int)")
+ tdSql.execute(f"create table {dbname}.normal_table2(ts timestamp, i int) comment '' ttl 3")
+ tdSql.execute(f"create table {dbname}.normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
@@ -58,32 +59,32 @@ class TDTestCase:
tdSql.checkData(0, 7, 2147483647)
tdSql.checkData(0, 8, 'hello')
- tdSql.execute("alter table normal_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.normal_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 8, 'nihao')
- tdSql.execute("alter table normal_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.normal_table1 comment ''")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 8, '')
- tdSql.execute("alter table normal_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.normal_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table2'")
tdSql.checkData(0, 0, 'normal_table2')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table normal_table3 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.normal_table3 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'")
tdSql.checkData(0, 0, 'normal_table3')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table normal_table1 ttl 1")
+ tdSql.execute(f"alter table {dbname}.normal_table1 ttl 1")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 7, 1)
- tdSql.execute("alter table normal_table3 ttl 0")
+ tdSql.execute(f"alter table {dbname}.normal_table3 ttl 0")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'")
tdSql.checkData(0, 0, 'normal_table3')
tdSql.checkData(0, 7, 0)
@@ -91,9 +92,9 @@ class TDTestCase:
print("============== STEP 2 ===== test super table")
- tdSql.execute("create table super_table1(ts timestamp, i int) tags(t int)")
- tdSql.execute("create table super_table2(ts timestamp, i int) tags(t int) comment ''")
- tdSql.execute("create table super_table3(ts timestamp, i int) tags(t int) comment 'super'")
+ tdSql.execute(f"create table {dbname}.super_table1(ts timestamp, i int) tags(t int)")
+ tdSql.execute(f"create table {dbname}.super_table2(ts timestamp, i int) tags(t int) comment ''")
+ tdSql.execute(f"create table {dbname}.super_table3(ts timestamp, i int) tags(t int) comment 'super'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
@@ -110,32 +111,32 @@ class TDTestCase:
tdSql.checkData(0, 6, 'super')
- tdSql.execute("alter table super_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.super_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
tdSql.checkData(0, 6, 'nihao')
- tdSql.execute("alter table super_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.super_table1 comment ''")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
tdSql.checkData(0, 6, '')
- tdSql.execute("alter table super_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.super_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table2'")
tdSql.checkData(0, 0, 'super_table2')
tdSql.checkData(0, 6, 'fly')
- tdSql.execute("alter table super_table3 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.super_table3 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table3'")
tdSql.checkData(0, 0, 'super_table3')
tdSql.checkData(0, 6, 'tdengine')
print("============== STEP 3 ===== test child table")
- tdSql.execute("create table child_table1 using super_table1 tags(1) ttl 10")
- tdSql.execute("create table child_table2 using super_table1 tags(1) comment ''")
- tdSql.execute("create table child_table3 using super_table1 tags(1) comment 'child'")
- tdSql.execute("insert into child_table4 using super_table1 tags(1) values(now, 1)")
+ tdSql.execute(f"create table {dbname}.child_table1 using {dbname}.super_table1 tags(1) ttl 10")
+ tdSql.execute(f"create table {dbname}.child_table2 using {dbname}.super_table1 tags(1) comment ''")
+ tdSql.execute(f"create table {dbname}.child_table3 using {dbname}.super_table1 tags(1) comment 'child'")
+ tdSql.execute(f"insert into {dbname}.child_table4 using {dbname}.super_table1 tags(1) values(now, 1)")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
@@ -160,38 +161,38 @@ class TDTestCase:
tdSql.checkData(0, 8, None)
- tdSql.execute("alter table child_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.child_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
tdSql.checkData(0, 0, 'child_table1')
tdSql.checkData(0, 8, 'nihao')
- tdSql.execute("alter table child_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.child_table1 comment ''")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
tdSql.checkData(0, 0, 'child_table1')
tdSql.checkData(0, 8, '')
- tdSql.execute("alter table child_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.child_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table2'")
tdSql.checkData(0, 0, 'child_table2')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table child_table3 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.child_table3 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'")
tdSql.checkData(0, 0, 'child_table3')
tdSql.checkData(0, 8, 'tdengine')
- tdSql.execute("alter table child_table4 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.child_table4 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'")
tdSql.checkData(0, 0, 'child_table4')
tdSql.checkData(0, 8, 'tdengine')
- tdSql.execute("alter table child_table4 ttl 9")
+ tdSql.execute(f"alter table {dbname}.child_table4 ttl 9")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'")
tdSql.checkData(0, 0, 'child_table4')
tdSql.checkData(0, 7, 9)
- tdSql.execute("alter table child_table3 ttl 9")
+ tdSql.execute(f"alter table {dbname}.child_table3 ttl 9")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'")
tdSql.checkData(0, 0, 'child_table3')
tdSql.checkData(0, 7, 9)
@@ -203,4 +204,3 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
-
diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py
index 8281527bd46be8f1b14d6ee2098a2888c20a737a..62940477cf701d69e8c8e7568ae4b56d68518d81 100644
--- a/tests/system-test/2-query/twa.py
+++ b/tests/system-test/2-query/twa.py
@@ -7,10 +7,7 @@ import platform
import math
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -21,46 +18,45 @@ class TDTestCase:
self.row_nums = 100
self.time_step = 1000
- def prepare_datas_of_distribute(self):
+ def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
for i in range(self.tb_nums):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
ts = self.ts
for j in range(self.row_nums):
ts+=j*self.time_step
tdSql.execute(
- f"insert into ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
+ f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
- def twa_support_types(self):
- tdSql.query("desc stb1 ")
+ def twa_support_types(self, dbname="testdb"):
+ tdSql.query(f"desc {dbname}.stb1 ")
schema_list = tdSql.queryResult
for col_type in schema_list:
if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE"]:
- tdSql.query(f" select twa({col_type[0]}) from stb1 partition by tbname ")
+ tdSql.query(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ")
else:
- tdSql.error(f" select twa({col_type[0]}) from stb1 partition by tbname ")
+ tdSql.error(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ")
- def check_distribute_datas(self):
+ def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -69,7 +65,7 @@ class TDTestCase:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query(f"select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'")
+ tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -83,28 +79,28 @@ class TDTestCase:
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
- def distribute_twa_query(self):
+ def distribute_twa_query(self, dbname="testdb"):
# basic filter
- tdSql.query(" select twa(c1) from ct1 ")
+ tdSql.query(f"select twa(c1) from {dbname}.ct1 ")
tdSql.checkData(0,0,1.000000000)
- tdSql.query(" select twa(c1) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
- tdSql.query(" select twa(c2) from stb1 group by tbname ")
+ tdSql.query(f"select twa(c2) from {dbname}.stb1 group by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,11111.000000000)
- tdSql.query("select twa(c1+c2) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1+c2) from {dbname}.stb1 partition by tbname ")
tdSql.checkData(0,0,11112.000000000)
- tdSql.query("select twa(c1) from stb1 partition by t1")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
# union all
- tdSql.query(" select twa(c1) from stb1 partition by tbname union all select twa(c1) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname union all select twa(c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.checkData(0,0,1.000000000)
@@ -112,26 +108,23 @@ class TDTestCase:
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
- tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
- tdSql.execute(" create table tb1 using st tags(1) ")
- tdSql.execute(" create table tb2 using st tags(2) ")
+ tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
+ tdSql.execute(" create table db.tb1 using db.st tags(1) ")
+ tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
- tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
- tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
- tdSql.query(" select twa(tb1.c1), twa(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select twa(tb1.c1), twa(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(1)
tdSql.checkData(0,0,4.500000000)
tdSql.checkData(0,1,4.500000000)
- # group by
- tdSql.execute(" use testdb ")
-
# mixup with other functions
- tdSql.query(" select twa(c1),twa(c2),max(c1),elapsed(ts) from stb1 ")
+ tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.stb1 ")
tdSql.checkData(0,0,1.000000000)
tdSql.checkData(0,1,11111.000000000)
tdSql.checkData(0,2,1)
diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py
index 88767ab888c9bfe11c329eecd41f78442436cafb..4040bb71cbb92849dd63d11627c93a2954a4a0d1 100644
--- a/tests/system-test/2-query/union.py
+++ b/tests/system-test/2-query/union.py
@@ -58,10 +58,10 @@ class TDTestCase:
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
table_reference = tb_list[0]
- join_condition = table_reference
+ join_condition = f'{table_reference} {table_reference.split(".")[-1]}'
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
- join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
+ join_condition += f" {join} {tb_list[i+1]} {tb_list[i+1].split('.')[-1]} on {table_reference.split('.')[-1]}.{filter}={tb_list[i+1].split('.')[-1]}.{filter}"
return join_condition
@@ -76,7 +76,6 @@ class TDTestCase:
elif query_conditon.startswith("min"):
query_conditon = query_conditon[4:-1]
-
if query_conditon:
return f" where {query_conditon} is not null"
if col in NUM_COL:
@@ -108,10 +107,10 @@ class TDTestCase:
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
- def __join_tblist(self):
+ def __join_tblist(self, dbname="db"):
return [
- ["ct1", "t1"],
- ["ct4", "t1"],
+ [f"{dbname}.ct1", f"{dbname}.t1"],
+ [f"{dbname}.ct4", f"{dbname}.t1"],
# ["ct1", "ct2", "ct4"],
# ["ct1", "ct2", "t1"],
# ["ct1", "ct4", "t1"],
@@ -120,10 +119,10 @@ class TDTestCase:
]
@property
- def __tb_liast(self):
+ def __tb_list(self, dbname="db"):
return [
- "ct1",
- "ct4",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
]
def sql_list(self):
@@ -131,7 +130,8 @@ class TDTestCase:
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
for join_tb in join_tblist:
- select_claus_list = self.__query_condition(join_tb)
+ join_tb_name = join_tb.split(".")[-1]
+ select_claus_list = self.__query_condition(join_tb_name)
for select_claus in select_claus_list:
group_claus = self.__group_condition( col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
@@ -141,9 +141,10 @@ class TDTestCase:
self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
)
)
- __no_join_tblist = self.__tb_liast
+ __no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
- select_claus_list = self.__query_condition(tb)
+ tb_name = join_tb.split(".")[-1]
+ select_claus_list = self.__query_condition(tb_name)
for select_claus in select_claus_list:
group_claus = self.__group_condition(col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
@@ -230,31 +231,29 @@ class TDTestCase:
else:
tdSql.error(f"{sqls[i]} union {sqls[j+i]}")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
- tdSql.error( "show tables union show tables" )
- tdSql.error( "create table errtb1 union all create table errtb2" )
- tdSql.error( "drop table ct1 union all drop table ct3" )
- tdSql.error( "select c1 from ct1 union all drop table ct3" )
- tdSql.error( "select c1 from ct1 union all '' " )
- tdSql.error( " '' union all select c1 from ct1 " )
- # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
+ tdSql.error( f"show {dbname}.tables union show {dbname}.tables" )
+ tdSql.error( f"create table {dbname}.errtb1 union all create table {dbname}.errtb2" )
+ tdSql.error( f"drop table {dbname}.ct1 union all drop table {dbname}.ct3" )
+ tdSql.error( f"select c1 from {dbname}.ct1 union all drop table {dbname}.ct3" )
+ tdSql.error( f"select c1 from {dbname}.ct1 union all '' " )
+ tdSql.error( f" '' union all select c1 from{dbname}. ct1 " )
def all_test(self):
self.__test_error()
self.union_check()
-
- def __create_tb(self):
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -264,30 +263,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
- { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -303,7 +301,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -319,13 +317,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -341,7 +339,6 @@ class TDTestCase:
'''
)
-
def run(self):
tdSql.prepare()
@@ -355,8 +352,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py
index ccf7e287e27d7768acedc17b55969d1fab6d30cd..ec77cbbcdc9d83d0a63b54fbe377c14d8645ce52 100644
--- a/tests/system-test/2-query/unique.py
+++ b/tests/system-test/2-query/unique.py
@@ -11,49 +11,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -69,84 +66,84 @@ class TDTestCase:
'''
)
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select unique from t1",
- "select unique(123--123)==1 from t1",
- "select unique(123,123) from t1",
- "select unique(c1,ts) from t1",
- "select unique(c1,c1,ts) from t1",
- "select unique(c1) as 'd1' from t1",
- "select unique(c1 ,c2 ) from t1",
- "select unique(c1 ,NULL) from t1",
- "select unique(,) from t1;",
- "select unique(floor(c1) ab from t1)",
- "select unique(c1) as int from t1",
- "select unique('c1') from t1",
- "select unique(NULL) from t1",
- "select unique('') from t1",
- "select unique(c%) from t1",
- "select unique(t1) from t1",
- "select unique(True) from t1",
- "select unique(c1) , count(c1) from t1",
- "select unique(c1) , avg(c1) from t1",
- "select unique(c1) , min(c1) from t1",
- "select unique(c1) , spread(c1) from t1",
- "select unique(c1) , diff(c1) from t1",
- #"select unique(c1) , abs(c1) from t1", # support
- #"select unique(c1) , c1 from t1",
- "select unique from stb1 partition by tbname",
- "select unique(123--123)==1 from stb1 partition by tbname",
- "select unique(123) from stb1 partition by tbname",
- "select unique(c1,ts) from stb1 partition by tbname",
- "select unique(c1,c1,ts) from stb1 partition by tbname",
- "select unique(c1) as 'd1' from stb1 partition by tbname",
- "select unique(c1 ,c2 ) from stb1 partition by tbname",
- "select unique(c1 ,NULL) from stb1 partition by tbname",
- "select unique(,) from stb1 partition by tbname;",
- #"select unique(floor(c1) ab from stb1 partition by tbname)", # support
- #"select unique(c1) as int from stb1 partition by tbname",
- "select unique('c1') from stb1 partition by tbname",
- "select unique(NULL) from stb1 partition by tbname",
- "select unique('') from stb1 partition by tbname",
- "select unique(c%) from stb1 partition by tbname",
- #"select unique(t1) from stb1 partition by tbname", # support
- "select unique(True) from stb1 partition by tbname",
- "select unique(c1) , count(c1) from stb1 partition by tbname",
- "select unique(c1) , avg(c1) from stb1 partition by tbname",
- "select unique(c1) , min(c1) from stb1 partition by tbname",
- "select unique(c1) , spread(c1) from stb1 partition by tbname",
- "select unique(c1) , diff(c1) from stb1 partition by tbname",
- #"select unique(c1) , abs(c1) from stb1 partition by tbname", # support
- #"select unique(c1) , c1 from stb1 partition by tbname" # support
+ f"select unique from {dbname}.t1",
+ f"select unique(123--123)==1 from {dbname}.t1",
+ f"select unique(123,123) from {dbname}.t1",
+ f"select unique(c1,ts) from {dbname}.t1",
+ f"select unique(c1,c1,ts) from {dbname}.t1",
+ f"select unique(c1) as 'd1' from {dbname}.t1",
+ f"select unique(c1 ,c2 ) from {dbname}.t1",
+ f"select unique(c1 ,NULL) from {dbname}.t1",
+ f"select unique(,) from {dbname}.t1;",
+ f"select unique(floor(c1) ab from {dbname}.t1)",
+ f"select unique(c1) as int from {dbname}.t1",
+ f"select unique('c1') from {dbname}.t1",
+ f"select unique(NULL) from {dbname}.t1",
+ f"select unique('') from {dbname}.t1",
+ f"select unique(c%) from {dbname}.t1",
+ f"select unique(t1) from {dbname}.t1",
+ f"select unique(True) from {dbname}.t1",
+ f"select unique(c1) , count(c1) from {dbname}.t1",
+ f"select unique(c1) , avg(c1) from {dbname}.t1",
+ f"select unique(c1) , min(c1) from {dbname}.t1",
+ f"select unique(c1) , spread(c1) from {dbname}.t1",
+ f"select unique(c1) , diff(c1) from {dbname}.t1",
+ #f"select unique(c1) , abs(c1) from {dbname}.t1", # support
+ #f"select unique(c1) , c1 from {dbname}.t1",
+ f"select unique from {dbname}.stb1 partition by tbname",
+ f"select unique(123--123)==1 from {dbname}.stb1 partition by tbname",
+ f"select unique(123) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1,c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) as 'd1' from {dbname}.stb1 partition by tbname",
+ f"select unique(c1 ,c2 ) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1 ,NULL) from {dbname}.stb1 partition by tbname",
+ f"select unique(,) from {dbname}.stb1 partition by tbname;",
+ #f"select unique(floor(c1) ab from {dbname}.stb1 partition by tbname)", # support
+ #f"select unique(c1) as int from {dbname}.stb1 partition by tbname",
+ f"select unique('c1') from {dbname}.stb1 partition by tbname",
+ f"select unique(NULL) from {dbname}.stb1 partition by tbname",
+ f"select unique('') from {dbname}.stb1 partition by tbname",
+ f"select unique(c%) from {dbname}.stb1 partition by tbname",
+ #f"select unique(t1) from {dbname}.stb1 partition by tbname", # support
+ f"select unique(True) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , count(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , avg(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , min(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , spread(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , diff(c1) from {dbname}.stb1 partition by tbname",
+ #f"select unique(c1) , abs(c1) from {dbname}.stb1 partition by tbname", # support
+ #f"select unique(c1) , c1 from {dbname}.stb1 partition by tbname" # support
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
pass
- def support_types(self):
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select unique(ts) from t1" ,
- "select unique(c7) from t1",
- "select unique(c8) from t1",
- "select unique(c9) from t1",
- "select unique(ts) from ct1" ,
- "select unique(c7) from ct1",
- "select unique(c8) from ct1",
- "select unique(c9) from ct1",
- "select unique(ts) from ct3" ,
- "select unique(c7) from ct3",
- "select unique(c8) from ct3",
- "select unique(c9) from ct3",
- "select unique(ts) from ct4" ,
- "select unique(c7) from ct4",
- "select unique(c8) from ct4",
- "select unique(c9) from ct4",
- "select unique(ts) from stb1 partition by tbname" ,
- "select unique(c7) from stb1 partition by tbname",
- "select unique(c8) from stb1 partition by tbname",
- "select unique(c9) from stb1 partition by tbname"
+ f"select unique(ts) from {dbname}.t1" ,
+ f"select unique(c7) from {dbname}.t1",
+ f"select unique(c8) from {dbname}.t1",
+ f"select unique(c9) from {dbname}.t1",
+ f"select unique(ts) from {dbname}.ct1" ,
+ f"select unique(c7) from {dbname}.ct1",
+ f"select unique(c8) from {dbname}.ct1",
+ f"select unique(c9) from {dbname}.ct1",
+ f"select unique(ts) from {dbname}.ct3" ,
+ f"select unique(c7) from {dbname}.ct3",
+ f"select unique(c8) from {dbname}.ct3",
+ f"select unique(c9) from {dbname}.ct3",
+ f"select unique(ts) from {dbname}.ct4" ,
+ f"select unique(c7) from {dbname}.ct4",
+ f"select unique(c8) from {dbname}.ct4",
+ f"select unique(c9) from {dbname}.ct4",
+ f"select unique(ts) from {dbname}.stb1 partition by tbname" ,
+ f"select unique(c7) from {dbname}.stb1 partition by tbname",
+ f"select unique(c8) from {dbname}.stb1 partition by tbname",
+ f"select unique(c9) from {dbname}.stb1 partition by tbname"
]
for type_sql in other_no_value_types:
@@ -154,43 +151,43 @@ class TDTestCase:
tdLog.info("support type ok , sql is : %s"%type_sql)
type_sql_lists = [
- "select unique(c1) from t1",
- "select unique(c2) from t1",
- "select unique(c3) from t1",
- "select unique(c4) from t1",
- "select unique(c5) from t1",
- "select unique(c6) from t1",
-
- "select unique(c1) from ct1",
- "select unique(c2) from ct1",
- "select unique(c3) from ct1",
- "select unique(c4) from ct1",
- "select unique(c5) from ct1",
- "select unique(c6) from ct1",
-
- "select unique(c1) from ct3",
- "select unique(c2) from ct3",
- "select unique(c3) from ct3",
- "select unique(c4) from ct3",
- "select unique(c5) from ct3",
- "select unique(c6) from ct3",
-
- "select unique(c1) from stb1 partition by tbname",
- "select unique(c2) from stb1 partition by tbname",
- "select unique(c3) from stb1 partition by tbname",
- "select unique(c4) from stb1 partition by tbname",
- "select unique(c5) from stb1 partition by tbname",
- "select unique(c6) from stb1 partition by tbname",
-
- "select unique(c6) as alisb from stb1 partition by tbname",
- "select unique(c6) alisb from stb1 partition by tbname",
+ f"select unique(c1) from {dbname}.t1",
+ f"select unique(c2) from {dbname}.t1",
+ f"select unique(c3) from {dbname}.t1",
+ f"select unique(c4) from {dbname}.t1",
+ f"select unique(c5) from {dbname}.t1",
+ f"select unique(c6) from {dbname}.t1",
+
+ f"select unique(c1) from {dbname}.ct1",
+ f"select unique(c2) from {dbname}.ct1",
+ f"select unique(c3) from {dbname}.ct1",
+ f"select unique(c4) from {dbname}.ct1",
+ f"select unique(c5) from {dbname}.ct1",
+ f"select unique(c6) from {dbname}.ct1",
+
+ f"select unique(c1) from {dbname}.ct3",
+ f"select unique(c2) from {dbname}.ct3",
+ f"select unique(c3) from {dbname}.ct3",
+ f"select unique(c4) from {dbname}.ct3",
+ f"select unique(c5) from {dbname}.ct3",
+ f"select unique(c6) from {dbname}.ct3",
+
+ f"select unique(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c2) from {dbname}.stb1 partition by tbname",
+ f"select unique(c3) from {dbname}.stb1 partition by tbname",
+ f"select unique(c4) from {dbname}.stb1 partition by tbname",
+ f"select unique(c5) from {dbname}.stb1 partition by tbname",
+ f"select unique(c6) from {dbname}.stb1 partition by tbname",
+
+ f"select unique(c6) as alisb from {dbname}.stb1 partition by tbname",
+ f"select unique(c6) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
def check_unique_table(self , unique_sql):
- # unique_sql = "select unique(c1) from ct1"
+ # unique_sql = f"select unique(c1) from {dbname}.ct1"
origin_sql = unique_sql.replace("unique(","").replace(")","")
tdSql.query(unique_sql)
unique_result = tdSql.queryResult
@@ -219,83 +216,83 @@ class TDTestCase:
else:
tdLog.exit(" unique query check fail , unique sql is: %s " %unique_sql)
- def basic_unique_function(self):
+ def basic_unique_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select unique(c1) from ct3")
+ tdSql.query(f"select unique(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c2) from ct3")
+ tdSql.query(f"select unique(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c3) from ct3")
+ tdSql.query(f"select unique(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c4) from ct3")
+ tdSql.query(f"select unique(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c5) from ct3")
+ tdSql.query(f"select unique(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c6) from ct3")
+ tdSql.query(f"select unique(c6) from {dbname}.ct3")
# will support _rowts mix with
- # tdSql.query("select unique(c6),_rowts from ct3")
+ # tdSql.query(f"select unique(c6),_rowts from {dbname}.ct3")
# auto check for t1 table
# used for regular table
- tdSql.query("select unique(c1) from t1")
+ tdSql.query(f"select unique(c1) from {dbname}.t1")
- tdSql.query("desc t1")
+ tdSql.query(f"desc {dbname}.t1")
col_lists_rows = tdSql.queryResult
col_lists = []
for col_name in col_lists_rows:
col_lists.append(col_name[0])
for col in col_lists:
- self.check_unique_table(f"select unique({col}) from t1")
+ self.check_unique_table(f"select unique({col}) from {dbname}.t1")
# unique with super tags
- tdSql.query("select unique(c1) from ct1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct1")
tdSql.checkRows(10)
- tdSql.query("select unique(c1) from ct4")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4")
tdSql.checkRows(10)
- #tdSql.error("select unique(c1),tbname from ct1") #support
- #tdSql.error("select unique(c1),t1 from ct1") #support
+ #tdSql.error(f"select unique(c1),tbname from {dbname}.ct1") #support
+ #tdSql.error(f"select unique(c1),t1 from {dbname}.ct1") #support
# unique with common col
- #tdSql.error("select unique(c1) ,ts from ct1")
- #tdSql.error("select unique(c1) ,c1 from ct1")
+ #tdSql.error(f"select unique(c1) ,ts from {dbname}.ct1")
+ #tdSql.error(f"select unique(c1) ,c1 from {dbname}.ct1")
# unique with scalar function
- #tdSql.error("select unique(c1) ,abs(c1) from ct1")
- tdSql.error("select unique(c1) , unique(c2) from ct1")
- #tdSql.error("select unique(c1) , abs(c2)+2 from ct1")
+ #tdSql.error(f"select unique(c1) ,abs(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) , unique(c2) from {dbname}.ct1")
+ #tdSql.error(f"select unique(c1) , abs(c2)+2 from {dbname}.ct1")
# unique with aggregate function
- tdSql.error("select unique(c1) ,sum(c1) from ct1")
- tdSql.error("select unique(c1) ,max(c1) from ct1")
- tdSql.error("select unique(c1) ,csum(c1) from ct1")
- tdSql.error("select unique(c1) ,count(c1) from ct1")
+ tdSql.error(f"select unique(c1) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,count(c1) from {dbname}.ct1")
# unique with filter where
- tdSql.query("select unique(c1) from ct4 where c1 is null")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
- tdSql.query("select unique(c1) from ct4 where c1 >2 order by 1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 >2 order by 1")
tdSql.checkData(0, 0, 3)
tdSql.checkData(1, 0, 4)
tdSql.checkData(2, 0, 5)
tdSql.checkData(5, 0, 8)
- tdSql.query("select unique(c1) from ct4 where c2 between 0 and 99999 order by 1 desc")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1 desc")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1, 0, 7)
tdSql.checkData(2, 0, 6)
@@ -307,43 +304,43 @@ class TDTestCase:
tdSql.checkData(8, 0, 0)
# unique with union all
- tdSql.query("select unique(c1) from ct4 union all select c1 from ct1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select c1 from {dbname}.ct1")
tdSql.checkRows(23)
- tdSql.query("select unique(c1) from ct4 union all select distinct(c1) from ct4")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4")
tdSql.checkRows(20)
- tdSql.query("select unique(c2) from ct4 union all select abs(c2)/2 from ct4")
+ tdSql.query(f"select unique(c2) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4")
tdSql.checkRows(22)
# unique with join
# prepare join datas with same ts
tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(" create stable db.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(" create table db.tb1 using db.st1 tags(1)")
+ tdSql.execute(" create table db.tb2 using db.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(" create stable db.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(" create table db.ttb1 using db.st2 tags(1)")
+ tdSql.execute(" create table db.ttb2 using db.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1")
+ tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1")
tdSql.checkRows(10)
tdSql.checkData(0,0,0)
tdSql.checkData(1,0,1)
tdSql.checkData(2,0,2)
tdSql.checkData(9,0,9)
- tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1")
+ tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1")
tdSql.checkRows(20)
tdSql.checkData(0,0,0)
tdSql.checkData(2,0,1)
@@ -351,23 +348,23 @@ class TDTestCase:
tdSql.checkData(18,0,9)
# nest query
- # tdSql.query("select unique(c1) from (select c1 from ct1)")
- tdSql.query("select c1 from (select unique(c1) c1 from ct4) order by 1 desc nulls first")
+ # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select unique(c1) c1 from {dbname}.ct4) order by 1 desc nulls first")
tdSql.checkRows(10)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 8)
tdSql.checkData(9, 0, 0)
- tdSql.query("select sum(c1) from (select unique(c1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 45)
- tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select unique(c1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 45)
tdSql.checkData(1, 0, 45)
- tdSql.query("select 1-abs(c1) from (select unique(c1) c1 from ct4) order by 1 nulls first")
+ tdSql.query(f"select 1-abs(c1) from (select unique(c1) c1 from {dbname}.ct4) order by 1 nulls first")
tdSql.checkRows(10)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, -7.000000000)
@@ -375,104 +372,103 @@ class TDTestCase:
# bug for stable
#partition by tbname
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
# group by
- tdSql.error("select unique(c1) from ct1 group by c1")
- tdSql.error("select unique(c1) from ct1 group by tbname")
+ tdSql.error(f"select unique(c1) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select unique(c1) from {dbname}.ct1 group by tbname")
# super table
# super table
- tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname")
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
# bug need fix
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname")
# tdSql.checkRows(4)
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname")
# tdSql.checkRows(4)
- # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
# # bug need fix
- # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- tdSql.query(" select unique(t1) from stb1 ")
+ tdSql.query(f"select unique(t1) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select unique(t1+c1) from stb1 ")
+ tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 ")
tdSql.checkRows(13)
- tdSql.query(" select unique(t1+c1) from stb1 partition by tbname ")
+ tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(20)
- tdSql.query(" select unique(t1) from stb1 partition by tbname ")
+ tdSql.query(f"select unique(t1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(2)
# nest query
- tdSql.query(" select unique(c1) from (select _rowts , t1 ,c1 , tbname from stb1 ) ")
+ tdSql.query(f"select unique(c1) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ")
tdSql.checkRows(11)
tdSql.checkData(0,0,6)
tdSql.checkData(10,0,3)
- tdSql.query("select unique(t1) from (select _rowts , t1 , tbname from stb1 )")
+ tdSql.query(f"select unique(t1) from (select _rowts , t1 , tbname from {dbname}.stb1 )")
tdSql.checkRows(2)
tdSql.checkData(0,0,4)
tdSql.checkData(1,0,1)
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- tdSql.query("select unique(c2) from sub1_bound order by 1 desc")
+ tdSql.query(f"select unique(c2) from {dbname}.sub1_bound order by 1 desc")
tdSql.checkRows(5)
tdSql.checkData(0,0,9223372036854775807)
diff --git a/tests/system-test/2-query/upper.py b/tests/system-test/2-query/upper.py
index bb485161dd12885175c470e8b5542b1ab011f186..f15a6f3ba76d3acb5645f443cf068d4cce7d9755 100644
--- a/tests/system-test/2-query/upper.py
+++ b/tests/system-test/2-query/upper.py
@@ -95,16 +95,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__upper_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__upper_err_check(tb):
@@ -112,22 +112,20 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
-
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -137,83 +135,82 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
-
def run(self):
tdSql.prepare()
@@ -226,8 +223,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/varchar.py b/tests/system-test/2-query/varchar.py
index 5cc6c8e39965453c646cb267774564af1a66f42d..17c3ea633357cf16a8b17e52c180192d07e52a87 100644
--- a/tests/system-test/2-query/varchar.py
+++ b/tests/system-test/2-query/varchar.py
@@ -14,43 +14,44 @@ class TDTestCase:
tdSql.init(conn.cursor())
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+ dbname = "db"
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
tdLog.printNoPrefix("==========step2:insert data")
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "varchar1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "varchar2", "nchar2", now()+2a )
@@ -70,7 +71,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3: cast on varchar")
- tdSql.query("select c8 from ct1")
+ tdSql.query(f"select c8 from {dbname}.ct1")
for i in range(tdSql.queryRows):
tdSql.checkData(i,0, data_ct1_c8[i])
diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
index 6a26d2ce1f38774b2d63031c518883641c23f864..6cb152342be5c80b5f755d0b3f2f7e7bf1c7894a 100644
--- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py
+++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
@@ -259,7 +259,6 @@ class TDTestCase:
self.tmqCase1()
self.tmqCase2()
-
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
index 20e363341f914b66e5ba73f0d5521b393e5743f1..4cb208b616097815ce8dfb099854c5c936fcf08c 100644
--- a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
+++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
@@ -99,8 +99,8 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
- if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)):
- tdLog.exit("tmq consume rows error with snapshot = 0!")
+ # if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)):
+ # tdLog.exit("tmq consume rows error with snapshot = 0!")
tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
@@ -192,8 +192,8 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
- if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)):
- tdLog.exit("tmq consume rows error with snapshot = 0!")
+ # if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)):
+ # tdLog.exit("tmq consume rows error with snapshot = 0!")
tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py
index 992a128ac077a35708a1ef123ba61bf3352feb78..704811d083c47db53592cce8db85c71733a29057 100644
--- a/tests/system-test/7-tmq/tmqDropStbCtb.py
+++ b/tests/system-test/7-tmq/tmqDropStbCtb.py
@@ -155,8 +155,9 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
- if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
- tdLog.exit("tmq consume rows error with snapshot = 0!")
+ if self.snapshot == 0:
+ if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
@@ -246,8 +247,9 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
- if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
- tdLog.exit("tmq consume rows error with snapshot = 0!")
+ if self.snapshot == 0:
+ if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
diff --git a/tests/system-test/7-tmq/tmqShow.py b/tests/system-test/7-tmq/tmqShow.py
index 6f8183bf06cfa501f62c22c82c2915638ea7414b..c0f33d92049efe6eceffd01353e3bedc2c406ee9 100644
--- a/tests/system-test/7-tmq/tmqShow.py
+++ b/tests/system-test/7-tmq/tmqShow.py
@@ -19,6 +19,11 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,offset=1,cdbName='cdb'):
+ sql = "insert into %s.consumeinfo values "%cdbName
+ sql += "(now+%ds, %d, '%s', '%s', %d, %d, %d)"%(offset,consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
+ tdLog.info("consume info sql: %s"%sql)
+ tdSql.query(sql)
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -95,19 +100,23 @@ class TDTestCase:
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[0]
- tmqCom.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=1
+ self.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[1]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[1]
- tmqCom.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=2
+ self.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[2]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[2]
- tmqCom.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=3
+ self.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[3]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[3]
- tmqCom.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=4
+ self.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py
index cd13535684501d98673923254c7fe83adc432851..07602ec29f69f9fbd0dab90935e0922996c80f80 100644
--- a/tests/system-test/7-tmq/tmq_taosx.py
+++ b/tests/system-test/7-tmq/tmq_taosx.py
@@ -20,15 +20,9 @@ class TDTestCase:
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
- def checkFileContent(self):
- buildPath = tdCom.getBuildPath()
- cfgPath = tdCom.getClientCfgPath()
- cmdStr = '%s/build/bin/tmq_taosx_ci -c %s'%(buildPath, cfgPath)
- tdLog.info(cmdStr)
- os.system(cmdStr)
-
- srcFile = '%s/../log/tmq_taosx_tmp.source'%(cfgPath)
- dstFile = '%s/../log/tmq_taosx_tmp.result'%(cfgPath)
+ def checkJson(self, cfgPath, name):
+ srcFile = '%s/../log/%s.source'%(cfgPath, name)
+ dstFile = '%s/../log/%s.result'%(cfgPath, name)
tdLog.info("compare file: %s, %s"%(srcFile, dstFile))
consumeFile = open(srcFile, mode='r')
@@ -43,7 +37,31 @@ class TDTestCase:
tdLog.exit("compare error: %s != %s"%src, dst)
else:
break
+ return
+ def checkDropData(self):
+ tdSql.execute('use db_taosx')
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+ tdSql.query("select * from jt order by i")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 11)
+ tdSql.checkData(0, 2, '{"k1":1,"k2":"hello"}')
+ tdSql.checkData(1, 2, None)
+
+ tdSql.execute('use abc1')
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+ tdSql.query("select * from jt order by i")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 11)
+ tdSql.checkData(0, 2, '{"k1":1,"k2":"hello"}')
+ tdSql.checkData(1, 2, None)
+ return
+
+ def checkData(self):
tdSql.execute('use db_taosx')
tdSql.query("select * from ct3 order by c1 desc")
tdSql.checkRows(2)
@@ -52,6 +70,48 @@ class TDTestCase:
tdSql.checkData(1, 1, 23)
tdSql.checkData(1, 4, None)
+ tdSql.query("select * from st1 order by ts")
+ tdSql.checkRows(8)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 3)
+ tdSql.checkData(4, 1, 4)
+ tdSql.checkData(6, 1, 23)
+
+ tdSql.checkData(0, 2, 2)
+ tdSql.checkData(1, 2, 4)
+ tdSql.checkData(4, 2, 3)
+ tdSql.checkData(6, 2, 32)
+
+ tdSql.checkData(0, 3, 'a')
+ tdSql.checkData(1, 3, 'b')
+ tdSql.checkData(4, 3, 'hwj')
+ tdSql.checkData(6, 3, 's21ds')
+
+ tdSql.checkData(0, 4, None)
+ tdSql.checkData(1, 4, None)
+ tdSql.checkData(5, 4, 940)
+ tdSql.checkData(6, 4, None)
+
+ tdSql.checkData(0, 5, 1000)
+ tdSql.checkData(1, 5, 2000)
+ tdSql.checkData(4, 5, 1000)
+ tdSql.checkData(6, 5, 5000)
+
+ tdSql.checkData(0, 6, 'ttt')
+ tdSql.checkData(1, 6, None)
+ tdSql.checkData(4, 6, 'ttt')
+ tdSql.checkData(6, 6, None)
+
+ tdSql.checkData(0, 7, True)
+ tdSql.checkData(1, 7, None)
+ tdSql.checkData(4, 7, True)
+ tdSql.checkData(6, 7, None)
+
+ tdSql.checkData(0, 8, None)
+ tdSql.checkData(1, 8, None)
+ tdSql.checkData(4, 8, None)
+ tdSql.checkData(6, 8, None)
+
tdSql.query("select * from ct1")
tdSql.checkRows(4)
@@ -74,12 +134,82 @@ class TDTestCase:
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 1, 1)
tdSql.checkData(1, 2, '{"k1":1,"k2":"hello"}')
+ return
+
+ def checkWal1Vgroup(self):
+ buildPath = tdCom.getBuildPath()
+ cfgPath = tdCom.getClientCfgPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -c %s -sv 1 -dv 1'%(buildPath, cfgPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkJson(cfgPath, "tmq_taosx_tmp")
+ self.checkData()
+
+ return
+
+ def checkWalMultiVgroups(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 3 -dv 5'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkData()
+
+ return
+
+ def checkWalMultiVgroupsWithDropTable(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 3 -dv 5 -d'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkDropData()
+
+ return
+
+ def checkSnapshot1Vgroup(self):
+ buildPath = tdCom.getBuildPath()
+ cfgPath = tdCom.getClientCfgPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -c %s -sv 1 -dv 1 -s'%(buildPath, cfgPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkJson(cfgPath, "tmq_taosx_tmp_snapshot")
+ self.checkData()
+
+ return
+
+ def checkSnapshotMultiVgroups(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 2 -dv 4 -s'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkData()
+
+ return
+
+ def checkSnapshotMultiVgroupsWithDropTable(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 2 -dv 4 -s -d'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkDropData()
return
def run(self):
tdSql.prepare()
- self.checkFileContent()
+ self.checkWal1Vgroup()
+ self.checkSnapshot1Vgroup()
+
+ self.checkWalMultiVgroups()
+ self.checkSnapshotMultiVgroups()
+
+ self.checkWalMultiVgroupsWithDropTable()
+ self.checkSnapshotMultiVgroupsWithDropTable()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 1e958bdb298507113f51b4ce717314df634dea17..4305ceff56781fb4e12f725620b17053f6d4b299 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -124,44 +124,97 @@ python3 ./test.py -f 2-query/leastsquares.py
python3 ./test.py -f 2-query/leastsquares.py -R
python3 ./test.py -f 2-query/length.py
python3 ./test.py -f 2-query/length.py -R
+python3 ./test.py -f 2-query/log.py
+# python3 ./test.py -f 2-query/log.py -R
+python3 ./test.py -f 2-query/lower.py
+python3 ./test.py -f 2-query/lower.py -R
+python3 ./test.py -f 2-query/ltrim.py
+python3 ./test.py -f 2-query/ltrim.py -R
+python3 ./test.py -f 2-query/mavg.py
+python3 ./test.py -f 2-query/mavg.py -R
+python3 ./test.py -f 2-query/max_partition.py
+python3 ./test.py -f 2-query/max_partition.py -R
+python3 ./test.py -f 2-query/max.py
+python3 ./test.py -f 2-query/max.py -R
+python3 ./test.py -f 2-query/min.py
+python3 ./test.py -f 2-query/min.py -R
+python3 ./test.py -f 2-query/Now.py
+python3 ./test.py -f 2-query/Now.py -R
+python3 ./test.py -f 2-query/percentile.py
+python3 ./test.py -f 2-query/percentile.py -R
+python3 ./test.py -f 2-query/pow.py
+python3 ./test.py -f 2-query/pow.py -R
+python3 ./test.py -f 2-query/query_cols_tags_and_or.py
+python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R
+python3 ./test.py -f 2-query/round.py
+python3 ./test.py -f 2-query/round.py -R
+python3 ./test.py -f 2-query/rtrim.py
+python3 ./test.py -f 2-query/rtrim.py -R
+python3 ./test.py -f 2-query/sample.py
+python3 ./test.py -f 2-query/sample.py -R
+python3 ./test.py -f 2-query/sin.py
+python3 ./test.py -f 2-query/sin.py -R
+python3 ./test.py -f 2-query/smaTest.py
+python3 ./test.py -f 2-query/smaTest.py -R
+python3 ./test.py -f 2-query/sml.py
+python3 ./test.py -f 2-query/sml.py -R
+python3 ./test.py -f 2-query/spread.py
+python3 ./test.py -f 2-query/spread.py -R
+python3 ./test.py -f 2-query/sqrt.py
+python3 ./test.py -f 2-query/sqrt.py -R
+python3 ./test.py -f 2-query/statecount.py
+python3 ./test.py -f 2-query/statecount.py -R
+python3 ./test.py -f 2-query/stateduration.py
+python3 ./test.py -f 2-query/stateduration.py -R
+python3 ./test.py -f 2-query/substr.py
+python3 ./test.py -f 2-query/substr.py -R
+python3 ./test.py -f 2-query/sum.py
+python3 ./test.py -f 2-query/sum.py -R
+python3 ./test.py -f 2-query/tail.py
+python3 ./test.py -f 2-query/tail.py -R
+python3 ./test.py -f 2-query/tan.py
+# python3 ./test.py -f 2-query/tan.py -R
+python3 ./test.py -f 2-query/Timediff.py
+python3 ./test.py -f 2-query/Timediff.py -R
+python3 ./test.py -f 2-query/timetruncate.py
+# python3 ./test.py -f 2-query/timetruncate.py -R
+python3 ./test.py -f 2-query/timezone.py
+python3 ./test.py -f 2-query/timezone.py -R
+python3 ./test.py -f 2-query/To_iso8601.py
+python3 ./test.py -f 2-query/To_iso8601.py -R
+python3 ./test.py -f 2-query/To_unixtimestamp.py
+python3 ./test.py -f 2-query/To_unixtimestamp.py -R
+python3 ./test.py -f 2-query/Today.py
+# python3 ./test.py -f 2-query/Today.py -R
+python3 ./test.py -f 2-query/top.py
+python3 ./test.py -f 2-query/top.py -R
+python3 ./test.py -f 2-query/tsbsQuery.py
+python3 ./test.py -f 2-query/tsbsQuery.py -R
+python3 ./test.py -f 2-query/ttl_comment.py
+python3 ./test.py -f 2-query/ttl_comment.py -R
+python3 ./test.py -f 2-query/twa.py
+python3 ./test.py -f 2-query/twa.py -R
+python3 ./test.py -f 2-query/union.py
+python3 ./test.py -f 2-query/union.py -R
+python3 ./test.py -f 2-query/unique.py
+python3 ./test.py -f 2-query/unique.py -R
+python3 ./test.py -f 2-query/upper.py
+python3 ./test.py -f 2-query/upper.py -R
+python3 ./test.py -f 2-query/varchar.py
+python3 ./test.py -f 2-query/varchar.py -R
+
python3 ./test.py -f 1-insert/update_data.py
python3 ./test.py -f 1-insert/delete_data.py
-python3 ./test.py -f 2-query/varchar.py
-python3 ./test.py -f 2-query/ltrim.py
-python3 ./test.py -f 2-query/rtrim.py
-python3 ./test.py -f 2-query/upper.py
-python3 ./test.py -f 2-query/lower.py
python3 ./test.py -f 2-query/join2.py
-python3 ./test.py -f 2-query/substr.py
-python3 ./test.py -f 2-query/union.py
python3 ./test.py -f 2-query/union1.py
python3 ./test.py -f 2-query/concat2.py
-python3 ./test.py -f 2-query/spread.py
-python3 ./test.py -f 2-query/timezone.py
-python3 ./test.py -f 2-query/Now.py
-python3 ./test.py -f 2-query/Today.py
-python3 ./test.py -f 2-query/max.py
-python3 ./test.py -f 2-query/min.py
-python3 ./test.py -f 2-query/To_iso8601.py
-python3 ./test.py -f 2-query/To_unixtimestamp.py
-python3 ./test.py -f 2-query/timetruncate.py
-python3 ./test.py -f 2-query/Timediff.py
python3 ./test.py -f 2-query/json_tag.py
-python3 ./test.py -f 2-query/top.py
-python3 ./test.py -f 2-query/percentile.py
-python3 ./test.py -f 2-query/round.py
-python3 ./test.py -f 2-query/log.py
-python3 ./test.py -f 2-query/pow.py
-python3 ./test.py -f 2-query/sqrt.py
-python3 ./test.py -f 2-query/sin.py
-python3 ./test.py -f 2-query/tan.py
-python3 ./test.py -f 2-query/query_cols_tags_and_or.py
# python3 ./test.py -f 2-query/nestedQuery.py
# TD-15983 subquery output duplicate name column.
# Please Xiangyang Guo modify the following script
@@ -169,18 +222,8 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 2-query/elapsed.py
python3 ./test.py -f 2-query/csum.py
-python3 ./test.py -f 2-query/mavg.py
-python3 ./test.py -f 2-query/sample.py
python3 ./test.py -f 2-query/function_diff.py
-python3 ./test.py -f 2-query/unique.py
-python3 ./test.py -f 2-query/stateduration.py
-python3 ./test.py -f 2-query/statecount.py
-python3 ./test.py -f 2-query/tail.py
-python3 ./test.py -f 2-query/ttl_comment.py
-python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/queryQnode.py
-python3 ./test.py -f 2-query/max_partition.py
-python3 ./test.py -f 2-query/tsbsQuery.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
@@ -271,8 +314,8 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
-#python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
-#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
+python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
+python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
@@ -358,7 +401,7 @@ python3 ./test.py -f 2-query/interp.py -Q 2
python3 ./test.py -f 2-query/avg.py -Q 2
# python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/csum.py -Q 2
-python3 ./test.py -f 2-query/mavg.py -Q 2
+#python3 ./test.py -f 2-query/mavg.py -Q 2
python3 ./test.py -f 2-query/sample.py -Q 2
python3 ./test.py -f 2-query/function_diff.py -Q 2
python3 ./test.py -f 2-query/unique.py -Q 2
@@ -445,7 +488,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
# python3 ./test.py -f 2-query/avg.py -Q 3
# python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/csum.py -Q 3
-python3 ./test.py -f 2-query/mavg.py -Q 3
+#python3 ./test.py -f 2-query/mavg.py -Q 3
python3 ./test.py -f 2-query/sample.py -Q 3
python3 ./test.py -f 2-query/function_diff.py -Q 3
python3 ./test.py -f 2-query/unique.py -Q 3
@@ -471,3 +514,4 @@ python3 ./test.py -f 2-query/last_row.py -Q 3
python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
python3 ./test.py -f 2-query/sml.py -Q 3
python3 ./test.py -f 2-query/interp.py -Q 3
+
diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c
index aef50560316c87ca91d766a99bc7acc0509e0866..b90b781e4469f004df742b047d046ff7574601ba 100644
--- a/tests/test/c/sdbDump.c
+++ b/tests/test/c/sdbDump.c
@@ -20,6 +20,9 @@
#include "tconfig.h"
#include "tjson.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
+
#define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb"
#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode"
#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data"
@@ -429,6 +432,7 @@ int32_t parseArgs(int32_t argc, char *argv[]) {
char cmd[PATH_MAX * 2] = {0};
snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR);
+
system(cmd);
#ifdef WINDOWS
taosMulMkDir(TMP_SDB_DATA_DIR);
@@ -467,3 +471,5 @@ int32_t main(int32_t argc, char *argv[]) {
return dumpSdb();
}
+
+#pragma GCC diagnostic pop
\ No newline at end of file
diff --git a/tests/test/c/sml_test.c b/tests/test/c/sml_test.c
index 50249a5c5621aad4821fd7866950021f240c1c8a..1fd1def263fb9a142ca7df4be1a8bed839ea098e 100644
--- a/tests/test/c/sml_test.c
+++ b/tests/test/c/sml_test.c
@@ -63,6 +63,7 @@ int smlProcess_influx_Test() {
printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
int code = taos_errno(pRes);
taos_free_result(pRes);
+
return code;
}
@@ -1089,7 +1090,7 @@ int sml_add_tag_col_Test() {
if (code) return code;
const char *sql1[] = {
- "macylr,id=macylr_17875_1804,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000"
+ "macylr,id=macylr_17875_1804,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000"
};
pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, 0);
@@ -1100,34 +1101,91 @@ int sml_add_tag_col_Test() {
return code;
}
+int smlProcess_18784_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "disk,device=sdc inodes_used=176059i,total=1081101176832i 1661943960000000000",
+ "disk,device=sdc inodes_free=66932805i 1661943960000000000",
+ };
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s, rows:%d\n", __FUNCTION__, taos_errstr(pRes), taos_affected_rows(pRes));
+ int code = taos_errno(pRes);
+ ASSERT(!code);
+ ASSERT(taos_affected_rows(pRes) == 2);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "select * from disk");
+ ASSERT(pRes);
+ int fieldNum = taos_field_count(pRes);
+ ASSERT(fieldNum == 5);
+ printf("fieldNum:%d\n", fieldNum);
+ TAOS_ROW row = NULL;
+ int32_t rowIndex = 0;
+ while((row = taos_fetch_row(pRes)) != NULL) {
+ int64_t ts = *(int64_t*)row[0];
+ int64_t used = *(int64_t*)row[1];
+ int64_t total = *(int64_t*)row[2];
+ int64_t freed = *(int64_t*)row[3];
+ if(rowIndex == 0){
+ ASSERT(ts == 1661943960000);
+ ASSERT(used == 176059);
+ ASSERT(total == 1081101176832);
+ ASSERT(freed == 66932805);
+// ASSERT_EQ(latitude, 24.5208);
+// ASSERT_EQ(longitude, 28.09377);
+// ASSERT_EQ(elevation, 428);
+// ASSERT_EQ(velocity, 0);
+// ASSERT_EQ(heading, 304);
+// ASSERT_EQ(grade, 0);
+// ASSERT_EQ(fuel_consumption, 25);
+ }else{
+// ASSERT(0);
+ }
+ rowIndex++;
+ }
+ taos_free_result(pRes);
+
+ return code;
+}
+
int main(int argc, char *argv[]) {
int ret = 0;
ret = smlProcess_influx_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_telnet_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_json1_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_json2_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_json3_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_json4_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_TD15662_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_TD15742_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_16384_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_oom_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_16368_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_dup_time_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_16960_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_add_tag_col_Test();
+ ASSERT(!ret);
+ ret = smlProcess_18784_Test();
+ ASSERT(!ret);
return ret;
}
diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c
index d39ade7e91495d2b3ff1924efdb78103d7b423cc..71b31ba1071c977d9fd3d2ceb046bdff02ca53df 100644
--- a/tests/test/c/tmqSim.c
+++ b/tests/test/c/tmqSim.c
@@ -492,7 +492,6 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) {
static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length,
int32_t precision) {
if (val == NULL) {
- taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR);
return;
}
@@ -540,13 +539,34 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* f
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_JSON:
- memcpy(buf, val, length);
- buf[length] = 0;
- taosFprintfFile(pFile, "\'%s\'", buf);
+ {
+ char quotationStr[2];
+ int32_t bufIndex = 0;
+ quotationStr[0] = 0;
+ quotationStr[1] = 0;
+ for (int32_t i = 0; i < length; i++) {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ if (val[i] == '\"') {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ quotationStr[0] = '\"';
+ }
+ if (val[i] == ',') {
+ quotationStr[0] = '\"';
+ }
+ }
+ buf[bufIndex] = 0;
+ if (length == 0) {
+ quotationStr[0] = '\"';
+ }
+
+ taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr);
+ }
break;
case TSDB_DATA_TYPE_TIMESTAMP:
shellFormatTimestamp(buf, *(int64_t*)val, precision);
- taosFprintfFile(pFile, "'%s'", buf);
+ taosFprintfFile(pFile, "%s", buf);
break;
default:
break;
diff --git a/tests/test/c/tmq_taosx_ci.c b/tests/test/c/tmq_taosx_ci.c
index ece7ad4819f2947cb0a474491255dd296136581b..f917b9159e9914682c277329ddcfa4e269dc4908 100644
--- a/tests/test/c/tmq_taosx_ci.c
+++ b/tests/test/c/tmq_taosx_ci.c
@@ -22,8 +22,17 @@
#include "types.h"
static int running = 1;
-TdFilePtr g_fp = NULL;
-char dir[64]={0};
+TdFilePtr g_fp = NULL;
+typedef struct{
+ bool snapShot;
+ bool dropTable;
+ bool subTable;
+ int srcVgroups;
+ int dstVgroups;
+ char dir[64];
+}Config;
+
+Config g_conf = {0};
static TAOS* use_db(){
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@@ -41,7 +50,6 @@ static TAOS* use_db(){
}
static void msg_process(TAOS_RES* msg) {
- /*memset(buf, 0, 1024);*/
printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
printf("db: %s\n", tmq_get_db_name(msg));
printf("vg: %d\n", tmq_get_vgroup_id(msg));
@@ -51,8 +59,11 @@ static void msg_process(TAOS_RES* msg) {
if (result) {
printf("meta result: %s\n", result);
}
- taosFprintfFile(g_fp, result);
- taosFprintfFile(g_fp, "\n");
+ if(g_fp){
+ taosFprintfFile(g_fp, result);
+ taosFprintfFile(g_fp, "\n");
+ }
+
tmq_free_json_meta(result);
}
@@ -61,62 +72,10 @@ static void msg_process(TAOS_RES* msg) {
int32_t ret = tmq_write_raw(pConn, raw);
printf("write raw data: %s\n", tmq_err2str(ret));
-// else{
-// while(1){
-// int numOfRows = 0;
-// void *pData = NULL;
-// taos_fetch_raw_block(msg, &numOfRows, &pData);
-// if(numOfRows == 0) break;
-// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
-// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
-// printf("write raw data: %s\n", tmq_err2str(ret));
-// }
-// }
-
taos_close(pConn);
}
-int32_t init_env() {
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 1");
- if (taos_errno(pRes) != 0) {
- printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop database if exists abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
- if (taos_errno(pRes) != 0) {
- printf("error in create db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
+int buildDatabase(TAOS* pConn, TAOS_RES* pRes){
pRes = taos_query(pConn,
"create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
"nchar(8), t4 bool)");
@@ -133,7 +92,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
+ pRes = taos_query(pConn, "insert into ct0 values(1626006833400, 1, 2, 'a')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -168,7 +127,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
+ pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -224,6 +183,22 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(g_conf.dropTable){
+ pRes = taos_query(pConn, "drop table ct3 ct1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop table st1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
+
pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
if (taos_errno(pRes) != 0) {
printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
@@ -273,6 +248,15 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(g_conf.dropTable){
+ pRes = taos_query(pConn, "drop table n1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
+
pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
@@ -308,6 +292,129 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(g_conf.dropTable){
+ pRes = taos_query(pConn,
+ "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
+ "nchar(8), t4 bool)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop table st1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
+ return 0;
+}
+
+int buildStable(TAOS* pConn, TAOS_RES* pRes){
+ pRes = taos_query(pConn, "CREATE STABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` VARCHAR(16))");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table meters, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table d0 using meters tags(1, 'San Francisco')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create child table d0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table d1 using meters tags(2, 'Beijing')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create child table d1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create stream meters_summary_s into meters_summary as select _wstart, max(current) as current, groupid, location from meters partition by groupid, location interval(10m)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table meters_summary, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into d0 (ts, current) values (now, 120)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into table d0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ return 0;
+}
+
+int32_t init_env() {
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ char sql[128] = {0};
+ snprintf(sql, 128, "create database if not exists db_taosx vgroups %d", g_conf.dstVgroups);
+ pRes = taos_query(pConn, sql);
+ if (taos_errno(pRes) != 0) {
+ printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop topic if exists topic_db");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop topic, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop topic if exists meters_summary_t1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop topic, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop database if exists abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ snprintf(sql, 128, "create database if not exists abc1 vgroups %d", g_conf.srcVgroups);
+ pRes = taos_query(pConn, sql);
+ if (taos_errno(pRes) != 0) {
+ printf("error in create db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "use abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ if(g_conf.subTable){
+ buildStable(pConn, pRes);
+ }else{
+ buildDatabase(pConn, pRes);
+ }
+
taos_close(pConn);
return 0;
}
@@ -327,12 +434,21 @@ int32_t create_topic() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
- if (taos_errno(pRes) != 0) {
- printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
- return -1;
+ if(g_conf.subTable){
+ pRes = taos_query(pConn, "create topic meters_summary_t1 with meta as stable meters_summary");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create topic meters_summary_t1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }else{
+ pRes = taos_query(pConn, "create topic topic_db with meta as database abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create topic topic_db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
}
- taos_free_result(pRes);
taos_close(pConn);
return 0;
@@ -343,17 +459,6 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
}
tmq_t* build_consumer() {
-#if 0
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- assert(pConn != NULL);
-
- TAOS_RES* pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- }
- taos_free_result(pRes);
-#endif
-
tmq_conf_t* conf = tmq_conf_new();
tmq_conf_set(conf, "group.id", "tg2");
tmq_conf_set(conf, "client.id", "my app 1");
@@ -363,7 +468,9 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "enable.heartbeat.background", "true");
- /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
+ if(g_conf.snapShot){
+ tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+ }
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
@@ -374,8 +481,11 @@ tmq_t* build_consumer() {
tmq_list_t* build_topic_list() {
tmq_list_t* topic_list = tmq_list_new();
- tmq_list_append(topic_list, "topic_ctb_column");
- /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
+ if(g_conf.subTable){
+ tmq_list_append(topic_list, "meters_summary_t1");
+ }else{
+ tmq_list_append(topic_list, "topic_db");
+ }
return topic_list;
}
@@ -393,12 +503,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
if (tmqmessage) {
cnt++;
msg_process(tmqmessage);
- /*if (cnt >= 2) break;*/
- /*printf("get data\n");*/
taos_free_result(tmqmessage);
- /*} else {*/
- /*break;*/
- /*tmq_commit_sync(tmq, NULL);*/
}else{
break;
}
@@ -411,52 +516,18 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
fprintf(stderr, "%% Consumer closed\n");
}
-void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- static const int MIN_COMMIT_COUNT = 1;
-
- int msg_count = 0;
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- return;
- }
-
- tmq_list_t* subList = NULL;
- tmq_subscription(tmq, &subList);
- char** subTopics = tmq_list_to_c_array(subList);
- int32_t sz = tmq_list_get_size(subList);
- printf("subscribed topics: ");
- for (int32_t i = 0; i < sz; i++) {
- printf("%s, ", subTopics[i]);
- }
- printf("\n");
- tmq_list_destroy(subList);
-
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- msg_process(tmqmessage);
- taos_free_result(tmqmessage);
-
- /*tmq_commit_sync(tmq, NULL);*/
- /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
void initLogFile() {
char f1[256] = {0};
char f2[256] = {0};
- sprintf(f1, "%s/../log/tmq_taosx_tmp.source", dir);
- sprintf(f2, "%s/../log/tmq_taosx_tmp.result", dir);
+ if(g_conf.snapShot){
+ sprintf(f1, "%s/../log/tmq_taosx_tmp_snapshot.source", g_conf.dir);
+ sprintf(f2, "%s/../log/tmq_taosx_tmp_snapshot.result", g_conf.dir);
+ }else{
+ sprintf(f1, "%s/../log/tmq_taosx_tmp.source", g_conf.dir);
+ sprintf(f2, "%s/../log/tmq_taosx_tmp.result", g_conf.dir);
+ }
+
TdFilePtr pFile = taosOpenFile(f1, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM);
if (NULL == pFile) {
fprintf(stderr, "Failed to open %s for save result\n", f1);
@@ -469,43 +540,76 @@ void initLogFile() {
fprintf(stderr, "Failed to open %s for save result\n", f2);
exit(-1);
}
- char *result[] = {
- "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}",
- "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
- "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
- "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}",
- "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}",
- "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}",
- "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}",
- "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
- "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
- "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}"
- };
-
- for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
- taosFprintfFile(pFile2, result[i]);
- taosFprintfFile(pFile2, "\n");
+
+ if(g_conf.snapShot){
+ char *result[] = {
+ "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":64},{\"name\":\"c4\",\"type\":5}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1},{\"name\":\"t2\",\"type\":8,\"length\":64}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":5000}]}",
+ "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c2\",\"type\":10,\"length\":8},{\"name\":\"cc3\",\"type\":5}],\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}",
+ };
+
+ for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
+ taosFprintfFile(pFile2, result[i]);
+ taosFprintfFile(pFile2, "\n");
+ }
+ }else{
+ char *result[] = {
+ "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}",
+ "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}",
+ "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}",
+ "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}"
+ };
+
+ for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
+ taosFprintfFile(pFile2, result[i]);
+ taosFprintfFile(pFile2, "\n");
+ }
}
+
taosCloseFile(&pFile2);
}
int main(int argc, char* argv[]) {
- if(argc == 3 && strcmp(argv[1], "-c") == 0) {
- strcpy(dir, argv[2]);
- }else{
- strcpy(dir, "../../../sim/psim/cfg");
+ for (int32_t i = 1; i < argc; i++) {
+ if(strcmp(argv[i], "-c") == 0){
+ strcpy(g_conf.dir, argv[++i]);
+ }else if(strcmp(argv[i], "-s") == 0){
+ g_conf.snapShot = true;
+ }else if(strcmp(argv[i], "-d") == 0){
+ g_conf.dropTable = true;
+ }else if(strcmp(argv[i], "-sv") == 0){
+ g_conf.srcVgroups = atol(argv[++i]);
+ }else if(strcmp(argv[i], "-dv") == 0){
+ g_conf.dstVgroups = atol(argv[++i]);
+ }else if(strcmp(argv[i], "-t") == 0){
+ g_conf.subTable = true;
+ }
}
printf("env init\n");
- initLogFile();
+ if(strlen(g_conf.dir) != 0){
+ initLogFile();
+ }
if (init_env() < 0) {
return -1;
@@ -515,6 +619,5 @@ int main(int argc, char* argv[]) {
tmq_t* tmq = build_consumer();
tmq_list_t* topic_list = build_topic_list();
basic_consume_loop(tmq, topic_list);
- /*sync_consume_loop(tmq, topic_list);*/
taosCloseFile(&g_fp);
}
diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c
index b993a8dbf1377c57af81d767173f5bfd27b688e3..16732ff9a191576d871584253f2c70e9187b6d2f 100644
--- a/tests/tsim/src/simExe.c
+++ b/tests/tsim/src/simExe.c
@@ -464,7 +464,10 @@ void simStoreSystemContentResult(SScript *script, char *filename) {
taosCloseFile(&pFile);
char rmCmd[MAX_FILE_NAME_LEN] = {0};
sprintf(rmCmd, "rm -f %s", filename);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
system(rmCmd);
+#pragma GCC diagnostic pop
}
}
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 5eec1746185cc31ebcf7a19f18bfcc4fb198f9d8..03097e31b9d65c48cdc543524d12dc0a80e2c6b3 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -82,6 +82,7 @@ ELSE ()
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
)
EXECUTE_PROCESS(
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
COMMAND git rev-parse --short HEAD
RESULT_VARIABLE commit_sha1
OUTPUT_VARIABLE taosadapter_commit_sha1
@@ -103,9 +104,9 @@ ELSE ()
ExternalProject_Add(upx
PREFIX "${_upx_prefix}"
URL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz
- CONFIGURE_COMMAND cmake -E true
- BUILD_COMMAND cmake -E true
- INSTALL_COMMAND cmake -E true
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ""
+ INSTALL_COMMAND ""
)
ExternalProject_Add(taosadapter
@@ -118,8 +119,8 @@ ELSE ()
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND ${_upx_prefix}/src/upx/upx taosadapter
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
@@ -141,8 +142,8 @@ ELSE ()
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
@@ -157,9 +158,9 @@ ELSE ()
ExternalProject_Add(upx
PREFIX "${_upx_prefix}"
URL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-win32.zip
- CONFIGURE_COMMAND cmake -E true
- BUILD_COMMAND cmake -E true
- INSTALL_COMMAND cmake -E true
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ""
+ INSTALL_COMMAND ""
)
ExternalProject_Add(taosadapter
@@ -174,8 +175,8 @@ ELSE ()
BUILD_COMMAND
COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client
COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib
- COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND ${_upx_prefix}/src/upx/upx taosadapter.exe
COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin
diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h
index 26ca6895ace188257ad9b16642cfe1f09bc792b4..15f6f6dc6a362c8c94994727fe19fa090ca94c57 100644
--- a/tools/shell/inc/shellInt.h
+++ b/tools/shell/inc/shellInt.h
@@ -113,7 +113,7 @@ int32_t shellExecute();
int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision);
void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields);
void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t length, int32_t precision);
-void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision);
+void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision);
// shellUtil.c
int32_t shellCheckIntSize();
void shellPrintVersion();
diff --git a/tools/shell/src/shellCommand.c b/tools/shell/src/shellCommand.c
index d87e10fd0897aaeea8a203d7b3d26e1fa02425cf..b73317e991042f6ce96a470ca9325cc2754fe47a 100644
--- a/tools/shell/src/shellCommand.c
+++ b/tools/shell/src/shellCommand.c
@@ -510,7 +510,10 @@ int32_t shellReadCommand(char *command) {
shellClearLineAfter(&cmd);
break;
case 12: // Ctrl + L;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
system("clear");
+#pragma GCC diagnostic pop
shellShowOnScreen(&cmd);
break;
case 21: // Ctrl + U;
diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c
index 68e3a272c33d2b97da02d5a27561de2b8dd5fa6f..45d5489803fb5a0f7ec5506320d9e21257c8281b 100644
--- a/tools/shell/src/shellEngine.c
+++ b/tools/shell/src/shellEngine.c
@@ -62,7 +62,10 @@ int32_t shellRunSingleCommand(char *command) {
}
if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
- system("clear");
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
+ system("clear");
+#pragma GCC diagnostic pop
return 0;
}
@@ -266,7 +269,6 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) {
void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) {
if (val == NULL) {
- taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR);
return;
}
@@ -314,13 +316,34 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_JSON:
- memcpy(buf, val, length);
- buf[length] = 0;
- taosFprintfFile(pFile, "\'%s\'", buf);
+ {
+ char quotationStr[2];
+ int32_t bufIndex = 0;
+ quotationStr[0] = 0;
+ quotationStr[1] = 0;
+ for (int32_t i = 0; i < length; i++) {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ if (val[i] == '\"') {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ quotationStr[0] = '\"';
+ }
+ if (val[i] == ',') {
+ quotationStr[0] = '\"';
+ }
+ }
+ buf[bufIndex] = 0;
+ if (length == 0) {
+ quotationStr[0] = '\"';
+ }
+
+ taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr);
+ }
break;
case TSDB_DATA_TYPE_TIMESTAMP:
shellFormatTimestamp(buf, *(int64_t *)val, precision);
- taosFprintfFile(pFile, "'%s'", buf);
+ taosFprintfFile(pFile, "%s", buf);
break;
default:
break;
diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c
index 2dcab04b3f4bfd072d766ff1e25c015cf609466f..b8b8392b961a92263f791ee4b480e61a8c148efd 100644
--- a/tools/shell/src/shellWebsocket.c
+++ b/tools/shell/src/shellWebsocket.c
@@ -18,19 +18,19 @@
#include "shellInt.h"
int shell_conn_ws_server(bool first) {
- shell.ws_conn = ws_connect_with_dsn(shell.args.dsn);
- if (!shell.ws_conn) {
- fprintf(stderr, "failed to connect %s, reason: %s\n",
- shell.args.dsn, ws_errstr(NULL));
- return -1;
- }
- if (first && shell.args.restful) {
- fprintf(stdout, "successfully connect to %s\n\n",
- shell.args.dsn);
- } else if (first && shell.args.cloud) {
- fprintf(stdout, "successfully connect to cloud service\n");
- }
- return 0;
+ shell.ws_conn = ws_connect_with_dsn(shell.args.dsn);
+ if (!shell.ws_conn) {
+ fprintf(stderr, "failed to connect %s, reason: %s\n",
+ shell.args.dsn, ws_errstr(NULL));
+ return -1;
+ }
+ if (first && shell.args.restful) {
+ fprintf(stdout, "successfully connect to %s\n\n",
+ shell.args.dsn);
+ } else if (first && shell.args.cloud) {
+ fprintf(stdout, "successfully connect to cloud service\n");
+ }
+ return 0;
}
static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
@@ -39,7 +39,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
ws_fetch_block(wres, &data, &rows);
*execute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
- return 0;
+ return 0;
}
int num_fields = ws_field_count(wres);
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -64,7 +64,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
putchar(' ');
putchar('|');
}
- putchar('\r');
+ putchar('\r');
putchar('\n');
}
numOfRows += rows;
@@ -79,7 +79,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) {
ws_fetch_block(wres, &data, &rows);
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
- return 0;
+ return 0;
}
int num_fields = ws_field_count(wres);
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -98,7 +98,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) {
uint32_t len;
for (int i = 0; i < rows; i++) {
printf("*************************** %d.row ***************************\n",
- numOfRows + 1);
+ numOfRows + 1);
for (int j = 0; j < num_fields; j++) {
TAOS_FIELD* field = fields + j;
int padding = (int)(maxColNameLen - strlen(field->name));
@@ -121,7 +121,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute
}
TdFilePtr pFile = taosOpenFile(fullname,
- TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
+ TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
if (pFile == NULL) {
fprintf(stderr, "failed to open file: %s\r\n", fullname);
return -1;
@@ -132,7 +132,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
taosCloseFile(&pFile);
- return 0;
+ return 0;
}
int numOfRows = 0;
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -207,7 +207,7 @@ void shellRunSingleCommandWebsocketImp(char *command) {
}
if (!shell.ws_conn && shell_conn_ws_server(0)) {
- return;
+ return;
}
shell.stop_query = false;
@@ -216,16 +216,16 @@ void shellRunSingleCommandWebsocketImp(char *command) {
WS_RES* res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout);
int code = ws_errno(res);
if (code != 0) {
- et = taosGetTimestampUs();
- fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
- if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
- fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
- } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
- fprintf(stderr, "TDengine server is down, will try to reconnect\n");
- shell.ws_conn = NULL;
- }
- ws_free_result(res);
- return;
+ et = taosGetTimestampUs();
+ fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
+ if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
+ fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
+ } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
+ fprintf(stderr, "TDengine server is down, will try to reconnect\n");
+ shell.ws_conn = NULL;
+ }
+ ws_free_result(res);
+ return;
}
double execute_time = ws_take_timing(res)/1E6;
@@ -233,36 +233,36 @@ void shellRunSingleCommandWebsocketImp(char *command) {
if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
fprintf(stdout, "Database changed.\r\n\r\n");
fflush(stdout);
- ws_free_result(res);
+ ws_free_result(res);
return;
}
int numOfRows = 0;
if (ws_is_update_query(res)) {
- numOfRows = ws_affected_rows(res);
- et = taosGetTimestampUs();
+ numOfRows = ws_affected_rows(res);
+ et = taosGetTimestampUs();
double total_time = (et - st)/1E3;
double net_time = total_time - (double)execute_time;
- printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows);
+ printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows);
printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
} else {
- int error_no = 0;
- numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time);
- if (numOfRows < 0) {
- ws_free_result(res);
- return;
- }
- et = taosGetTimestampUs();
+ int error_no = 0;
+ numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time);
+ if (numOfRows < 0) {
+ ws_free_result(res);
+ return;
+ }
+ et = taosGetTimestampUs();
double total_time = (et - st) / 1E3;
double net_time = total_time - execute_time;
- if (error_no == 0 && !shell.stop_query) {
- printf("Query OK, %d row(s) in set\n", numOfRows);
- printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
- } else {
- printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows,
- (et - st)/1E6);
- printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
- }
+ if (error_no == 0 && !shell.stop_query) {
+ printf("Query OK, %d row(s) in set\n", numOfRows);
+ printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
+ } else {
+ printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows,
+ (et - st)/1E6);
+ printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
+ }
}
printf("\n");
ws_free_result(res);